xhci: fix logging
[qemu/ar7.git] / target / sparc / translate.c
blobaa6734d54e8f06e6a4efc93bf87f7fa63a070ad2
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 bool fpu_enabled;
76 bool address_mask_32bit;
77 bool singlestep;
78 #ifndef CONFIG_USER_ONLY
79 bool supervisor;
80 #ifdef TARGET_SPARC64
81 bool hypervisor;
82 #endif
83 #endif
85 uint32_t cc_op; /* current CC operation */
86 struct TranslationBlock *tb;
87 sparc_def_t *def;
88 TCGv_i32 t32[3];
89 TCGv ttl[5];
90 int n_t32;
91 int n_ttl;
92 #ifdef TARGET_SPARC64
93 int fprs_dirty;
94 int asi;
95 #endif
96 } DisasContext;
98 typedef struct {
99 TCGCond cond;
100 bool is_bool;
101 bool g1, g2;
102 TCGv c1, c2;
103 } DisasCompare;
105 // This function uses non-native bit order
106 #define GET_FIELD(X, FROM, TO) \
107 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109 // This function uses the order in the manuals, i.e. bit 0 is 2^0
110 #define GET_FIELD_SP(X, FROM, TO) \
111 GET_FIELD(X, 31 - (TO), 31 - (FROM))
113 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116 #ifdef TARGET_SPARC64
117 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
119 #else
120 #define DFPREG(r) (r & 0x1e)
121 #define QFPREG(r) (r & 0x1c)
122 #endif
124 #define UA2005_HTRAP_MASK 0xff
125 #define V8_TRAP_MASK 0x7f
127 static int sign_extend(int x, int len)
129 len = 32 - len;
130 return (x << len) >> len;
133 #define IS_IMM (insn & (1<<13))
135 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
137 TCGv_i32 t;
138 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
139 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
140 return t;
143 static inline TCGv get_temp_tl(DisasContext *dc)
145 TCGv t;
146 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
147 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
148 return t;
151 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
153 #if defined(TARGET_SPARC64)
154 int bit = (rd < 32) ? 1 : 2;
155 /* If we know we've already set this bit within the TB,
156 we can avoid setting it again. */
157 if (!(dc->fprs_dirty & bit)) {
158 dc->fprs_dirty |= bit;
159 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
161 #endif
164 /* floating point registers moves */
165 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
167 #if TCG_TARGET_REG_BITS == 32
168 if (src & 1) {
169 return TCGV_LOW(cpu_fpr[src / 2]);
170 } else {
171 return TCGV_HIGH(cpu_fpr[src / 2]);
173 #else
174 if (src & 1) {
175 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
176 } else {
177 TCGv_i32 ret = get_temp_i32(dc);
178 TCGv_i64 t = tcg_temp_new_i64();
180 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
181 tcg_gen_extrl_i64_i32(ret, t);
182 tcg_temp_free_i64(t);
184 return ret;
186 #endif
189 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
191 #if TCG_TARGET_REG_BITS == 32
192 if (dst & 1) {
193 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
194 } else {
195 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
197 #else
198 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
199 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
200 (dst & 1 ? 0 : 32), 32);
201 #endif
202 gen_update_fprs_dirty(dc, dst);
205 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207 return get_temp_i32(dc);
210 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
212 src = DFPREG(src);
213 return cpu_fpr[src / 2];
216 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
218 dst = DFPREG(dst);
219 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220 gen_update_fprs_dirty(dc, dst);
223 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225 return cpu_fpr[DFPREG(dst) / 2];
228 static void gen_op_load_fpr_QT0(unsigned int src)
230 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.upper));
232 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.lower));
236 static void gen_op_load_fpr_QT1(unsigned int src)
238 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
239 offsetof(CPU_QuadU, ll.upper));
240 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.lower));
244 static void gen_op_store_QT0_fpr(unsigned int dst)
246 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
247 offsetof(CPU_QuadU, ll.upper));
248 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.lower));
252 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253 TCGv_i64 v1, TCGv_i64 v2)
255 dst = QFPREG(dst);
257 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259 gen_update_fprs_dirty(dc, dst);
262 #ifdef TARGET_SPARC64
263 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
265 src = QFPREG(src);
266 return cpu_fpr[src / 2];
269 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
271 src = QFPREG(src);
272 return cpu_fpr[src / 2 + 1];
275 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
277 rd = QFPREG(rd);
278 rs = QFPREG(rs);
280 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282 gen_update_fprs_dirty(dc, rd);
284 #endif
286 /* moves */
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #ifdef TARGET_SPARC64
290 #define hypervisor(dc) 0
291 #endif
292 #else
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
296 #else
297 #define supervisor(dc) (dc->supervisor)
298 #endif
299 #endif
301 #ifdef TARGET_SPARC64
302 #ifndef TARGET_ABI32
303 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
304 #else
305 #define AM_CHECK(dc) (1)
306 #endif
307 #endif
309 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
311 #ifdef TARGET_SPARC64
312 if (AM_CHECK(dc))
313 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
314 #endif
317 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
319 if (reg > 0) {
320 assert(reg < 32);
321 return cpu_regs[reg];
322 } else {
323 TCGv t = get_temp_tl(dc);
324 tcg_gen_movi_tl(t, 0);
325 return t;
329 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
331 if (reg > 0) {
332 assert(reg < 32);
333 tcg_gen_mov_tl(cpu_regs[reg], v);
337 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
339 if (reg > 0) {
340 assert(reg < 32);
341 return cpu_regs[reg];
342 } else {
343 return get_temp_tl(dc);
347 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
348 target_ulong npc)
350 if (unlikely(s->singlestep)) {
351 return false;
354 #ifndef CONFIG_USER_ONLY
355 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
356 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
357 #else
358 return true;
359 #endif
362 static inline void gen_goto_tb(DisasContext *s, int tb_num,
363 target_ulong pc, target_ulong npc)
365 if (use_goto_tb(s, pc, npc)) {
366 /* jump to same page: we can use a direct jump */
367 tcg_gen_goto_tb(tb_num);
368 tcg_gen_movi_tl(cpu_pc, pc);
369 tcg_gen_movi_tl(cpu_npc, npc);
370 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
371 } else {
372 /* jump to another page: currently not optimized */
373 tcg_gen_movi_tl(cpu_pc, pc);
374 tcg_gen_movi_tl(cpu_npc, npc);
375 tcg_gen_exit_tb(0);
379 // XXX suboptimal
380 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
384 tcg_gen_andi_tl(reg, reg, 0x1);
387 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
389 tcg_gen_extu_i32_tl(reg, src);
390 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
391 tcg_gen_andi_tl(reg, reg, 0x1);
394 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
396 tcg_gen_extu_i32_tl(reg, src);
397 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
398 tcg_gen_andi_tl(reg, reg, 0x1);
401 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
403 tcg_gen_extu_i32_tl(reg, src);
404 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
405 tcg_gen_andi_tl(reg, reg, 0x1);
408 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
410 tcg_gen_mov_tl(cpu_cc_src, src1);
411 tcg_gen_mov_tl(cpu_cc_src2, src2);
412 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
413 tcg_gen_mov_tl(dst, cpu_cc_dst);
416 static TCGv_i32 gen_add32_carry32(void)
418 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
420 /* Carry is computed from a previous add: (dst < src) */
421 #if TARGET_LONG_BITS == 64
422 cc_src1_32 = tcg_temp_new_i32();
423 cc_src2_32 = tcg_temp_new_i32();
424 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
425 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
426 #else
427 cc_src1_32 = cpu_cc_dst;
428 cc_src2_32 = cpu_cc_src;
429 #endif
431 carry_32 = tcg_temp_new_i32();
432 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
434 #if TARGET_LONG_BITS == 64
435 tcg_temp_free_i32(cc_src1_32);
436 tcg_temp_free_i32(cc_src2_32);
437 #endif
439 return carry_32;
442 static TCGv_i32 gen_sub32_carry32(void)
444 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
446 /* Carry is computed from a previous borrow: (src1 < src2) */
447 #if TARGET_LONG_BITS == 64
448 cc_src1_32 = tcg_temp_new_i32();
449 cc_src2_32 = tcg_temp_new_i32();
450 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
451 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
452 #else
453 cc_src1_32 = cpu_cc_src;
454 cc_src2_32 = cpu_cc_src2;
455 #endif
457 carry_32 = tcg_temp_new_i32();
458 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
460 #if TARGET_LONG_BITS == 64
461 tcg_temp_free_i32(cc_src1_32);
462 tcg_temp_free_i32(cc_src2_32);
463 #endif
465 return carry_32;
468 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
469 TCGv src2, int update_cc)
471 TCGv_i32 carry_32;
472 TCGv carry;
474 switch (dc->cc_op) {
475 case CC_OP_DIV:
476 case CC_OP_LOGIC:
477 /* Carry is known to be zero. Fall back to plain ADD. */
478 if (update_cc) {
479 gen_op_add_cc(dst, src1, src2);
480 } else {
481 tcg_gen_add_tl(dst, src1, src2);
483 return;
485 case CC_OP_ADD:
486 case CC_OP_TADD:
487 case CC_OP_TADDTV:
488 if (TARGET_LONG_BITS == 32) {
489 /* We can re-use the host's hardware carry generation by using
490 an ADD2 opcode. We discard the low part of the output.
491 Ideally we'd combine this operation with the add that
492 generated the carry in the first place. */
493 carry = tcg_temp_new();
494 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
495 tcg_temp_free(carry);
496 goto add_done;
498 carry_32 = gen_add32_carry32();
499 break;
501 case CC_OP_SUB:
502 case CC_OP_TSUB:
503 case CC_OP_TSUBTV:
504 carry_32 = gen_sub32_carry32();
505 break;
507 default:
508 /* We need external help to produce the carry. */
509 carry_32 = tcg_temp_new_i32();
510 gen_helper_compute_C_icc(carry_32, cpu_env);
511 break;
514 #if TARGET_LONG_BITS == 64
515 carry = tcg_temp_new();
516 tcg_gen_extu_i32_i64(carry, carry_32);
517 #else
518 carry = carry_32;
519 #endif
521 tcg_gen_add_tl(dst, src1, src2);
522 tcg_gen_add_tl(dst, dst, carry);
524 tcg_temp_free_i32(carry_32);
525 #if TARGET_LONG_BITS == 64
526 tcg_temp_free(carry);
527 #endif
529 add_done:
530 if (update_cc) {
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_mov_tl(cpu_cc_dst, dst);
534 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
535 dc->cc_op = CC_OP_ADDX;
539 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
541 tcg_gen_mov_tl(cpu_cc_src, src1);
542 tcg_gen_mov_tl(cpu_cc_src2, src2);
543 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
544 tcg_gen_mov_tl(dst, cpu_cc_dst);
547 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
548 TCGv src2, int update_cc)
550 TCGv_i32 carry_32;
551 TCGv carry;
553 switch (dc->cc_op) {
554 case CC_OP_DIV:
555 case CC_OP_LOGIC:
556 /* Carry is known to be zero. Fall back to plain SUB. */
557 if (update_cc) {
558 gen_op_sub_cc(dst, src1, src2);
559 } else {
560 tcg_gen_sub_tl(dst, src1, src2);
562 return;
564 case CC_OP_ADD:
565 case CC_OP_TADD:
566 case CC_OP_TADDTV:
567 carry_32 = gen_add32_carry32();
568 break;
570 case CC_OP_SUB:
571 case CC_OP_TSUB:
572 case CC_OP_TSUBTV:
573 if (TARGET_LONG_BITS == 32) {
574 /* We can re-use the host's hardware carry generation by using
575 a SUB2 opcode. We discard the low part of the output.
576 Ideally we'd combine this operation with the add that
577 generated the carry in the first place. */
578 carry = tcg_temp_new();
579 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
580 tcg_temp_free(carry);
581 goto sub_done;
583 carry_32 = gen_sub32_carry32();
584 break;
586 default:
587 /* We need external help to produce the carry. */
588 carry_32 = tcg_temp_new_i32();
589 gen_helper_compute_C_icc(carry_32, cpu_env);
590 break;
593 #if TARGET_LONG_BITS == 64
594 carry = tcg_temp_new();
595 tcg_gen_extu_i32_i64(carry, carry_32);
596 #else
597 carry = carry_32;
598 #endif
600 tcg_gen_sub_tl(dst, src1, src2);
601 tcg_gen_sub_tl(dst, dst, carry);
603 tcg_temp_free_i32(carry_32);
604 #if TARGET_LONG_BITS == 64
605 tcg_temp_free(carry);
606 #endif
608 sub_done:
609 if (update_cc) {
610 tcg_gen_mov_tl(cpu_cc_src, src1);
611 tcg_gen_mov_tl(cpu_cc_src2, src2);
612 tcg_gen_mov_tl(cpu_cc_dst, dst);
613 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
614 dc->cc_op = CC_OP_SUBX;
618 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620 TCGv r_temp, zero, t0;
622 r_temp = tcg_temp_new();
623 t0 = tcg_temp_new();
625 /* old op:
626 if (!(env->y & 1))
627 T1 = 0;
629 zero = tcg_const_tl(0);
630 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
631 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
632 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
633 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
634 zero, cpu_cc_src2);
635 tcg_temp_free(zero);
637 // b2 = T0 & 1;
638 // env->y = (b2 << 31) | (env->y >> 1);
639 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
640 tcg_gen_shli_tl(r_temp, r_temp, 31);
641 tcg_gen_shri_tl(t0, cpu_y, 1);
642 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
643 tcg_gen_or_tl(t0, t0, r_temp);
644 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
646 // b1 = N ^ V;
647 gen_mov_reg_N(t0, cpu_psr);
648 gen_mov_reg_V(r_temp, cpu_psr);
649 tcg_gen_xor_tl(t0, t0, r_temp);
650 tcg_temp_free(r_temp);
652 // T0 = (b1 << 31) | (T0 >> 1);
653 // src1 = T0;
654 tcg_gen_shli_tl(t0, t0, 31);
655 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
656 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
657 tcg_temp_free(t0);
659 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
661 tcg_gen_mov_tl(dst, cpu_cc_dst);
664 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
666 #if TARGET_LONG_BITS == 32
667 if (sign_ext) {
668 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
669 } else {
670 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
672 #else
673 TCGv t0 = tcg_temp_new_i64();
674 TCGv t1 = tcg_temp_new_i64();
676 if (sign_ext) {
677 tcg_gen_ext32s_i64(t0, src1);
678 tcg_gen_ext32s_i64(t1, src2);
679 } else {
680 tcg_gen_ext32u_i64(t0, src1);
681 tcg_gen_ext32u_i64(t1, src2);
684 tcg_gen_mul_i64(dst, t0, t1);
685 tcg_temp_free(t0);
686 tcg_temp_free(t1);
688 tcg_gen_shri_i64(cpu_y, dst, 32);
689 #endif
692 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
694 /* zero-extend truncated operands before multiplication */
695 gen_op_multiply(dst, src1, src2, 0);
698 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
700 /* sign-extend truncated operands before multiplication */
701 gen_op_multiply(dst, src1, src2, 1);
704 // 1
705 static inline void gen_op_eval_ba(TCGv dst)
707 tcg_gen_movi_tl(dst, 1);
710 // Z
711 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
713 gen_mov_reg_Z(dst, src);
716 // Z | (N ^ V)
717 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
719 TCGv t0 = tcg_temp_new();
720 gen_mov_reg_N(t0, src);
721 gen_mov_reg_V(dst, src);
722 tcg_gen_xor_tl(dst, dst, t0);
723 gen_mov_reg_Z(t0, src);
724 tcg_gen_or_tl(dst, dst, t0);
725 tcg_temp_free(t0);
728 // N ^ V
729 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
731 TCGv t0 = tcg_temp_new();
732 gen_mov_reg_V(t0, src);
733 gen_mov_reg_N(dst, src);
734 tcg_gen_xor_tl(dst, dst, t0);
735 tcg_temp_free(t0);
738 // C | Z
739 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
741 TCGv t0 = tcg_temp_new();
742 gen_mov_reg_Z(t0, src);
743 gen_mov_reg_C(dst, src);
744 tcg_gen_or_tl(dst, dst, t0);
745 tcg_temp_free(t0);
748 // C
749 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
751 gen_mov_reg_C(dst, src);
754 // V
755 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
757 gen_mov_reg_V(dst, src);
760 // 0
761 static inline void gen_op_eval_bn(TCGv dst)
763 tcg_gen_movi_tl(dst, 0);
766 // N
767 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
769 gen_mov_reg_N(dst, src);
772 // !Z
773 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
775 gen_mov_reg_Z(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
779 // !(Z | (N ^ V))
780 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
782 gen_op_eval_ble(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
786 // !(N ^ V)
787 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
789 gen_op_eval_bl(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
793 // !(C | Z)
794 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
796 gen_op_eval_bleu(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
800 // !C
801 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803 gen_mov_reg_C(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
807 // !N
808 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810 gen_mov_reg_N(dst, src);
811 tcg_gen_xori_tl(dst, dst, 0x1);
814 // !V
815 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817 gen_mov_reg_V(dst, src);
818 tcg_gen_xori_tl(dst, dst, 0x1);
822 FPSR bit field FCC1 | FCC0:
826 3 unordered
828 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829 unsigned int fcc_offset)
831 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832 tcg_gen_andi_tl(reg, reg, 0x1);
835 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836 unsigned int fcc_offset)
838 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839 tcg_gen_andi_tl(reg, reg, 0x1);
842 // !0: FCC0 | FCC1
843 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
846 TCGv t0 = tcg_temp_new();
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(t0, src, fcc_offset);
849 tcg_gen_or_tl(dst, dst, t0);
850 tcg_temp_free(t0);
853 // 1 or 2: FCC0 ^ FCC1
854 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
857 TCGv t0 = tcg_temp_new();
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 gen_mov_reg_FCC1(t0, src, fcc_offset);
860 tcg_gen_xor_tl(dst, dst, t0);
861 tcg_temp_free(t0);
864 // 1 or 3: FCC0
865 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
871 // 1: FCC0 & !FCC1
872 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
873 unsigned int fcc_offset)
875 TCGv t0 = tcg_temp_new();
876 gen_mov_reg_FCC0(dst, src, fcc_offset);
877 gen_mov_reg_FCC1(t0, src, fcc_offset);
878 tcg_gen_andc_tl(dst, dst, t0);
879 tcg_temp_free(t0);
882 // 2 or 3: FCC1
883 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
884 unsigned int fcc_offset)
886 gen_mov_reg_FCC1(dst, src, fcc_offset);
889 // 2: !FCC0 & FCC1
890 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
891 unsigned int fcc_offset)
893 TCGv t0 = tcg_temp_new();
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 gen_mov_reg_FCC1(t0, src, fcc_offset);
896 tcg_gen_andc_tl(dst, t0, dst);
897 tcg_temp_free(t0);
900 // 3: FCC0 & FCC1
901 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_and_tl(dst, dst, t0);
908 tcg_temp_free(t0);
911 // 0: !(FCC0 | FCC1)
912 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
913 unsigned int fcc_offset)
915 TCGv t0 = tcg_temp_new();
916 gen_mov_reg_FCC0(dst, src, fcc_offset);
917 gen_mov_reg_FCC1(t0, src, fcc_offset);
918 tcg_gen_or_tl(dst, dst, t0);
919 tcg_gen_xori_tl(dst, dst, 0x1);
920 tcg_temp_free(t0);
923 // 0 or 3: !(FCC0 ^ FCC1)
924 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_xor_tl(dst, dst, t0);
931 tcg_gen_xori_tl(dst, dst, 0x1);
932 tcg_temp_free(t0);
935 // 0 or 2: !FCC0
936 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 tcg_gen_xori_tl(dst, dst, 0x1);
943 // !1: !(FCC0 & !FCC1)
944 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
945 unsigned int fcc_offset)
947 TCGv t0 = tcg_temp_new();
948 gen_mov_reg_FCC0(dst, src, fcc_offset);
949 gen_mov_reg_FCC1(t0, src, fcc_offset);
950 tcg_gen_andc_tl(dst, dst, t0);
951 tcg_gen_xori_tl(dst, dst, 0x1);
952 tcg_temp_free(t0);
955 // 0 or 1: !FCC1
956 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
957 unsigned int fcc_offset)
959 gen_mov_reg_FCC1(dst, src, fcc_offset);
960 tcg_gen_xori_tl(dst, dst, 0x1);
963 // !2: !(!FCC0 & FCC1)
964 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
965 unsigned int fcc_offset)
967 TCGv t0 = tcg_temp_new();
968 gen_mov_reg_FCC0(dst, src, fcc_offset);
969 gen_mov_reg_FCC1(t0, src, fcc_offset);
970 tcg_gen_andc_tl(dst, t0, dst);
971 tcg_gen_xori_tl(dst, dst, 0x1);
972 tcg_temp_free(t0);
975 // !3: !(FCC0 & FCC1)
976 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
977 unsigned int fcc_offset)
979 TCGv t0 = tcg_temp_new();
980 gen_mov_reg_FCC0(dst, src, fcc_offset);
981 gen_mov_reg_FCC1(t0, src, fcc_offset);
982 tcg_gen_and_tl(dst, dst, t0);
983 tcg_gen_xori_tl(dst, dst, 0x1);
984 tcg_temp_free(t0);
987 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
988 target_ulong pc2, TCGv r_cond)
990 TCGLabel *l1 = gen_new_label();
992 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994 gen_goto_tb(dc, 0, pc1, pc1 + 4);
996 gen_set_label(l1);
997 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1000 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
1002 TCGLabel *l1 = gen_new_label();
1003 target_ulong npc = dc->npc;
1005 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
1007 gen_goto_tb(dc, 0, npc, pc1);
1009 gen_set_label(l1);
1010 gen_goto_tb(dc, 1, npc + 4, npc + 8);
1012 dc->is_br = 1;
1015 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1017 target_ulong npc = dc->npc;
1019 if (likely(npc != DYNAMIC_PC)) {
1020 dc->pc = npc;
1021 dc->jump_pc[0] = pc1;
1022 dc->jump_pc[1] = npc + 4;
1023 dc->npc = JUMP_PC;
1024 } else {
1025 TCGv t, z;
1027 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1030 t = tcg_const_tl(pc1);
1031 z = tcg_const_tl(0);
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1033 tcg_temp_free(t);
1034 tcg_temp_free(z);
1036 dc->pc = DYNAMIC_PC;
1040 static inline void gen_generic_branch(DisasContext *dc)
1042 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1043 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1044 TCGv zero = tcg_const_tl(0);
1046 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1048 tcg_temp_free(npc0);
1049 tcg_temp_free(npc1);
1050 tcg_temp_free(zero);
1053 /* call this function before using the condition register as it may
1054 have been set for a jump */
1055 static inline void flush_cond(DisasContext *dc)
1057 if (dc->npc == JUMP_PC) {
1058 gen_generic_branch(dc);
1059 dc->npc = DYNAMIC_PC;
1063 static inline void save_npc(DisasContext *dc)
1065 if (dc->npc == JUMP_PC) {
1066 gen_generic_branch(dc);
1067 dc->npc = DYNAMIC_PC;
1068 } else if (dc->npc != DYNAMIC_PC) {
1069 tcg_gen_movi_tl(cpu_npc, dc->npc);
1073 static inline void update_psr(DisasContext *dc)
1075 if (dc->cc_op != CC_OP_FLAGS) {
1076 dc->cc_op = CC_OP_FLAGS;
1077 gen_helper_compute_psr(cpu_env);
1081 static inline void save_state(DisasContext *dc)
1083 tcg_gen_movi_tl(cpu_pc, dc->pc);
1084 save_npc(dc);
1087 static void gen_exception(DisasContext *dc, int which)
1089 TCGv_i32 t;
1091 save_state(dc);
1092 t = tcg_const_i32(which);
1093 gen_helper_raise_exception(cpu_env, t);
1094 tcg_temp_free_i32(t);
1095 dc->is_br = 1;
1098 static void gen_check_align(TCGv addr, int mask)
1100 TCGv_i32 r_mask = tcg_const_i32(mask);
1101 gen_helper_check_align(cpu_env, addr, r_mask);
1102 tcg_temp_free_i32(r_mask);
1105 static inline void gen_mov_pc_npc(DisasContext *dc)
1107 if (dc->npc == JUMP_PC) {
1108 gen_generic_branch(dc);
1109 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110 dc->pc = DYNAMIC_PC;
1111 } else if (dc->npc == DYNAMIC_PC) {
1112 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1113 dc->pc = DYNAMIC_PC;
1114 } else {
1115 dc->pc = dc->npc;
1119 static inline void gen_op_next_insn(void)
1121 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1122 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1125 static void free_compare(DisasCompare *cmp)
1127 if (!cmp->g1) {
1128 tcg_temp_free(cmp->c1);
1130 if (!cmp->g2) {
1131 tcg_temp_free(cmp->c2);
1135 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1136 DisasContext *dc)
1138 static int subcc_cond[16] = {
1139 TCG_COND_NEVER,
1140 TCG_COND_EQ,
1141 TCG_COND_LE,
1142 TCG_COND_LT,
1143 TCG_COND_LEU,
1144 TCG_COND_LTU,
1145 -1, /* neg */
1146 -1, /* overflow */
1147 TCG_COND_ALWAYS,
1148 TCG_COND_NE,
1149 TCG_COND_GT,
1150 TCG_COND_GE,
1151 TCG_COND_GTU,
1152 TCG_COND_GEU,
1153 -1, /* pos */
1154 -1, /* no overflow */
1157 static int logic_cond[16] = {
1158 TCG_COND_NEVER,
1159 TCG_COND_EQ, /* eq: Z */
1160 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1161 TCG_COND_LT, /* lt: N ^ V -> N */
1162 TCG_COND_EQ, /* leu: C | Z -> Z */
1163 TCG_COND_NEVER, /* ltu: C -> 0 */
1164 TCG_COND_LT, /* neg: N */
1165 TCG_COND_NEVER, /* vs: V -> 0 */
1166 TCG_COND_ALWAYS,
1167 TCG_COND_NE, /* ne: !Z */
1168 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1169 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1170 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1171 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1172 TCG_COND_GE, /* pos: !N */
1173 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1176 TCGv_i32 r_src;
1177 TCGv r_dst;
1179 #ifdef TARGET_SPARC64
1180 if (xcc) {
1181 r_src = cpu_xcc;
1182 } else {
1183 r_src = cpu_psr;
1185 #else
1186 r_src = cpu_psr;
1187 #endif
1189 switch (dc->cc_op) {
1190 case CC_OP_LOGIC:
1191 cmp->cond = logic_cond[cond];
1192 do_compare_dst_0:
1193 cmp->is_bool = false;
1194 cmp->g2 = false;
1195 cmp->c2 = tcg_const_tl(0);
1196 #ifdef TARGET_SPARC64
1197 if (!xcc) {
1198 cmp->g1 = false;
1199 cmp->c1 = tcg_temp_new();
1200 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1201 break;
1203 #endif
1204 cmp->g1 = true;
1205 cmp->c1 = cpu_cc_dst;
1206 break;
1208 case CC_OP_SUB:
1209 switch (cond) {
1210 case 6: /* neg */
1211 case 14: /* pos */
1212 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1213 goto do_compare_dst_0;
1215 case 7: /* overflow */
1216 case 15: /* !overflow */
1217 goto do_dynamic;
1219 default:
1220 cmp->cond = subcc_cond[cond];
1221 cmp->is_bool = false;
1222 #ifdef TARGET_SPARC64
1223 if (!xcc) {
1224 /* Note that sign-extension works for unsigned compares as
1225 long as both operands are sign-extended. */
1226 cmp->g1 = cmp->g2 = false;
1227 cmp->c1 = tcg_temp_new();
1228 cmp->c2 = tcg_temp_new();
1229 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1230 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1231 break;
1233 #endif
1234 cmp->g1 = cmp->g2 = true;
1235 cmp->c1 = cpu_cc_src;
1236 cmp->c2 = cpu_cc_src2;
1237 break;
1239 break;
1241 default:
1242 do_dynamic:
1243 gen_helper_compute_psr(cpu_env);
1244 dc->cc_op = CC_OP_FLAGS;
1245 /* FALLTHRU */
1247 case CC_OP_FLAGS:
1248 /* We're going to generate a boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->g1 = cmp->g2 = false;
1252 cmp->c1 = r_dst = tcg_temp_new();
1253 cmp->c2 = tcg_const_tl(0);
1255 switch (cond) {
1256 case 0x0:
1257 gen_op_eval_bn(r_dst);
1258 break;
1259 case 0x1:
1260 gen_op_eval_be(r_dst, r_src);
1261 break;
1262 case 0x2:
1263 gen_op_eval_ble(r_dst, r_src);
1264 break;
1265 case 0x3:
1266 gen_op_eval_bl(r_dst, r_src);
1267 break;
1268 case 0x4:
1269 gen_op_eval_bleu(r_dst, r_src);
1270 break;
1271 case 0x5:
1272 gen_op_eval_bcs(r_dst, r_src);
1273 break;
1274 case 0x6:
1275 gen_op_eval_bneg(r_dst, r_src);
1276 break;
1277 case 0x7:
1278 gen_op_eval_bvs(r_dst, r_src);
1279 break;
1280 case 0x8:
1281 gen_op_eval_ba(r_dst);
1282 break;
1283 case 0x9:
1284 gen_op_eval_bne(r_dst, r_src);
1285 break;
1286 case 0xa:
1287 gen_op_eval_bg(r_dst, r_src);
1288 break;
1289 case 0xb:
1290 gen_op_eval_bge(r_dst, r_src);
1291 break;
1292 case 0xc:
1293 gen_op_eval_bgu(r_dst, r_src);
1294 break;
1295 case 0xd:
1296 gen_op_eval_bcc(r_dst, r_src);
1297 break;
1298 case 0xe:
1299 gen_op_eval_bpos(r_dst, r_src);
1300 break;
1301 case 0xf:
1302 gen_op_eval_bvc(r_dst, r_src);
1303 break;
1305 break;
1309 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1311 unsigned int offset;
1312 TCGv r_dst;
1314 /* For now we still generate a straight boolean result. */
1315 cmp->cond = TCG_COND_NE;
1316 cmp->is_bool = true;
1317 cmp->g1 = cmp->g2 = false;
1318 cmp->c1 = r_dst = tcg_temp_new();
1319 cmp->c2 = tcg_const_tl(0);
1321 switch (cc) {
1322 default:
1323 case 0x0:
1324 offset = 0;
1325 break;
1326 case 0x1:
1327 offset = 32 - 10;
1328 break;
1329 case 0x2:
1330 offset = 34 - 10;
1331 break;
1332 case 0x3:
1333 offset = 36 - 10;
1334 break;
1337 switch (cond) {
1338 case 0x0:
1339 gen_op_eval_bn(r_dst);
1340 break;
1341 case 0x1:
1342 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0x2:
1345 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0x3:
1348 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0x4:
1351 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0x5:
1354 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1355 break;
1356 case 0x6:
1357 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1358 break;
1359 case 0x7:
1360 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1361 break;
1362 case 0x8:
1363 gen_op_eval_ba(r_dst);
1364 break;
1365 case 0x9:
1366 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1367 break;
1368 case 0xa:
1369 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1370 break;
1371 case 0xb:
1372 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1373 break;
1374 case 0xc:
1375 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1376 break;
1377 case 0xd:
1378 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1379 break;
1380 case 0xe:
1381 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1382 break;
1383 case 0xf:
1384 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1385 break;
1389 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1390 DisasContext *dc)
1392 DisasCompare cmp;
1393 gen_compare(&cmp, cc, cond, dc);
1395 /* The interface is to return a boolean in r_dst. */
1396 if (cmp.is_bool) {
1397 tcg_gen_mov_tl(r_dst, cmp.c1);
1398 } else {
1399 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1402 free_compare(&cmp);
1405 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1407 DisasCompare cmp;
1408 gen_fcompare(&cmp, cc, cond);
1410 /* The interface is to return a boolean in r_dst. */
1411 if (cmp.is_bool) {
1412 tcg_gen_mov_tl(r_dst, cmp.c1);
1413 } else {
1414 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1417 free_compare(&cmp);
1420 #ifdef TARGET_SPARC64
1421 // Inverted logic
1422 static const int gen_tcg_cond_reg[8] = {
1424 TCG_COND_NE,
1425 TCG_COND_GT,
1426 TCG_COND_GE,
1428 TCG_COND_EQ,
1429 TCG_COND_LE,
1430 TCG_COND_LT,
1433 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1435 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1436 cmp->is_bool = false;
1437 cmp->g1 = true;
1438 cmp->g2 = false;
1439 cmp->c1 = r_src;
1440 cmp->c2 = tcg_const_tl(0);
1443 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1445 DisasCompare cmp;
1446 gen_compare_reg(&cmp, cond, r_src);
1448 /* The interface is to return a boolean in r_dst. */
1449 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1451 free_compare(&cmp);
1453 #endif
1455 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1457 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1458 target_ulong target = dc->pc + offset;
1460 #ifdef TARGET_SPARC64
1461 if (unlikely(AM_CHECK(dc))) {
1462 target &= 0xffffffffULL;
1464 #endif
1465 if (cond == 0x0) {
1466 /* unconditional not taken */
1467 if (a) {
1468 dc->pc = dc->npc + 4;
1469 dc->npc = dc->pc + 4;
1470 } else {
1471 dc->pc = dc->npc;
1472 dc->npc = dc->pc + 4;
1474 } else if (cond == 0x8) {
1475 /* unconditional taken */
1476 if (a) {
1477 dc->pc = target;
1478 dc->npc = dc->pc + 4;
1479 } else {
1480 dc->pc = dc->npc;
1481 dc->npc = target;
1482 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1484 } else {
1485 flush_cond(dc);
1486 gen_cond(cpu_cond, cc, cond, dc);
1487 if (a) {
1488 gen_branch_a(dc, target);
1489 } else {
1490 gen_branch_n(dc, target);
1495 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1497 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1498 target_ulong target = dc->pc + offset;
1500 #ifdef TARGET_SPARC64
1501 if (unlikely(AM_CHECK(dc))) {
1502 target &= 0xffffffffULL;
1504 #endif
1505 if (cond == 0x0) {
1506 /* unconditional not taken */
1507 if (a) {
1508 dc->pc = dc->npc + 4;
1509 dc->npc = dc->pc + 4;
1510 } else {
1511 dc->pc = dc->npc;
1512 dc->npc = dc->pc + 4;
1514 } else if (cond == 0x8) {
1515 /* unconditional taken */
1516 if (a) {
1517 dc->pc = target;
1518 dc->npc = dc->pc + 4;
1519 } else {
1520 dc->pc = dc->npc;
1521 dc->npc = target;
1522 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1524 } else {
1525 flush_cond(dc);
1526 gen_fcond(cpu_cond, cc, cond);
1527 if (a) {
1528 gen_branch_a(dc, target);
1529 } else {
1530 gen_branch_n(dc, target);
1535 #ifdef TARGET_SPARC64
1536 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1537 TCGv r_reg)
1539 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1540 target_ulong target = dc->pc + offset;
1542 if (unlikely(AM_CHECK(dc))) {
1543 target &= 0xffffffffULL;
1545 flush_cond(dc);
1546 gen_cond_reg(cpu_cond, cond, r_reg);
1547 if (a) {
1548 gen_branch_a(dc, target);
1549 } else {
1550 gen_branch_n(dc, target);
1554 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1556 switch (fccno) {
1557 case 0:
1558 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1559 break;
1560 case 1:
1561 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1562 break;
1563 case 2:
1564 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1565 break;
1566 case 3:
1567 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1568 break;
1572 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1574 switch (fccno) {
1575 case 0:
1576 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1577 break;
1578 case 1:
1579 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 2:
1582 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 3:
1585 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1590 static inline void gen_op_fcmpq(int fccno)
1592 switch (fccno) {
1593 case 0:
1594 gen_helper_fcmpq(cpu_fsr, cpu_env);
1595 break;
1596 case 1:
1597 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1598 break;
1599 case 2:
1600 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1601 break;
1602 case 3:
1603 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1604 break;
1608 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1610 switch (fccno) {
1611 case 0:
1612 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1613 break;
1614 case 1:
1615 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1616 break;
1617 case 2:
1618 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1619 break;
1620 case 3:
1621 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1622 break;
1626 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1628 switch (fccno) {
1629 case 0:
1630 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1631 break;
1632 case 1:
1633 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1634 break;
1635 case 2:
1636 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1637 break;
1638 case 3:
1639 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1640 break;
1644 static inline void gen_op_fcmpeq(int fccno)
1646 switch (fccno) {
1647 case 0:
1648 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1649 break;
1650 case 1:
1651 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1652 break;
1653 case 2:
1654 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1655 break;
1656 case 3:
1657 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1658 break;
1662 #else
1664 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1666 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1669 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1674 static inline void gen_op_fcmpq(int fccno)
1676 gen_helper_fcmpq(cpu_fsr, cpu_env);
1679 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1681 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1684 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1686 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1689 static inline void gen_op_fcmpeq(int fccno)
1691 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1693 #endif
1695 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1697 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1698 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1699 gen_exception(dc, TT_FP_EXCP);
1702 static int gen_trap_ifnofpu(DisasContext *dc)
1704 #if !defined(CONFIG_USER_ONLY)
1705 if (!dc->fpu_enabled) {
1706 gen_exception(dc, TT_NFPU_INSN);
1707 return 1;
1709 #endif
1710 return 0;
1713 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1715 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1718 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1721 TCGv_i32 dst, src;
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1726 gen(dst, cpu_env, src);
1727 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1729 gen_store_fpr_F(dc, rd, dst);
1732 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_i32, TCGv_i32))
1735 TCGv_i32 dst, src;
1737 src = gen_load_fpr_F(dc, rs);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src);
1742 gen_store_fpr_F(dc, rd, dst);
1745 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1746 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1748 TCGv_i32 dst, src1, src2;
1750 src1 = gen_load_fpr_F(dc, rs1);
1751 src2 = gen_load_fpr_F(dc, rs2);
1752 dst = gen_dest_fpr_F(dc);
1754 gen(dst, cpu_env, src1, src2);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_F(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1764 TCGv_i32 dst, src1, src2;
1766 src1 = gen_load_fpr_F(dc, rs1);
1767 src2 = gen_load_fpr_F(dc, rs2);
1768 dst = gen_dest_fpr_F(dc);
1770 gen(dst, src1, src2);
1772 gen_store_fpr_F(dc, rd, dst);
1774 #endif
1776 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1777 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1779 TCGv_i64 dst, src;
1781 src = gen_load_fpr_D(dc, rs);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1792 void (*gen)(TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src;
1796 src = gen_load_fpr_D(dc, rs);
1797 dst = gen_dest_fpr_D(dc, rd);
1799 gen(dst, src);
1801 gen_store_fpr_D(dc, rd, dst);
1803 #endif
1805 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_env, src1, src2);
1815 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817 gen_store_fpr_D(dc, rd, dst);
1820 #ifdef TARGET_SPARC64
1821 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1824 TCGv_i64 dst, src1, src2;
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1830 gen(dst, src1, src2);
1832 gen_store_fpr_D(dc, rd, dst);
1835 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 TCGv_i64 dst, src1, src2;
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 dst = gen_dest_fpr_D(dc, rd);
1844 gen(dst, cpu_gsr, src1, src2);
1846 gen_store_fpr_D(dc, rd, dst);
1849 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1850 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1852 TCGv_i64 dst, src0, src1, src2;
1854 src1 = gen_load_fpr_D(dc, rs1);
1855 src2 = gen_load_fpr_D(dc, rs2);
1856 src0 = gen_load_fpr_D(dc, rd);
1857 dst = gen_dest_fpr_D(dc, rd);
1859 gen(dst, src0, src1, src2);
1861 gen_store_fpr_D(dc, rd, dst);
1863 #endif
1865 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_ptr))
1868 gen_op_load_fpr_QT1(QFPREG(rs));
1870 gen(cpu_env);
1871 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(dc, QFPREG(rd));
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_ptr))
1881 gen_op_load_fpr_QT1(QFPREG(rs));
1883 gen(cpu_env);
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1888 #endif
1890 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1891 void (*gen)(TCGv_ptr))
1893 gen_op_load_fpr_QT0(QFPREG(rs1));
1894 gen_op_load_fpr_QT1(QFPREG(rs2));
1896 gen(cpu_env);
1897 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1899 gen_op_store_QT0_fpr(QFPREG(rd));
1900 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1904 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1906 TCGv_i64 dst;
1907 TCGv_i32 src1, src2;
1909 src1 = gen_load_fpr_F(dc, rs1);
1910 src2 = gen_load_fpr_F(dc, rs2);
1911 dst = gen_dest_fpr_D(dc, rd);
1913 gen(dst, cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916 gen_store_fpr_D(dc, rd, dst);
1919 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1920 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1922 TCGv_i64 src1, src2;
1924 src1 = gen_load_fpr_D(dc, rs1);
1925 src2 = gen_load_fpr_D(dc, rs2);
1927 gen(cpu_env, src1, src2);
1928 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1930 gen_op_store_QT0_fpr(QFPREG(rd));
1931 gen_update_fprs_dirty(dc, QFPREG(rd));
1934 #ifdef TARGET_SPARC64
1935 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1938 TCGv_i64 dst;
1939 TCGv_i32 src;
1941 src = gen_load_fpr_F(dc, rs);
1942 dst = gen_dest_fpr_D(dc, rd);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_D(dc, rd, dst);
1949 #endif
1951 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1954 TCGv_i64 dst;
1955 TCGv_i32 src;
1957 src = gen_load_fpr_F(dc, rs);
1958 dst = gen_dest_fpr_D(dc, rd);
1960 gen(dst, cpu_env, src);
1962 gen_store_fpr_D(dc, rd, dst);
1965 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1966 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1968 TCGv_i32 dst;
1969 TCGv_i64 src;
1971 src = gen_load_fpr_D(dc, rs);
1972 dst = gen_dest_fpr_F(dc);
1974 gen(dst, cpu_env, src);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977 gen_store_fpr_F(dc, rd, dst);
1980 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i32, TCGv_ptr))
1983 TCGv_i32 dst;
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_F(dc);
1988 gen(dst, cpu_env);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991 gen_store_fpr_F(dc, rd, dst);
1994 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_i64, TCGv_ptr))
1997 TCGv_i64 dst;
1999 gen_op_load_fpr_QT1(QFPREG(rs));
2000 dst = gen_dest_fpr_D(dc, rd);
2002 gen(dst, cpu_env);
2003 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
2005 gen_store_fpr_D(dc, rd, dst);
2008 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2009 void (*gen)(TCGv_ptr, TCGv_i32))
2011 TCGv_i32 src;
2013 src = gen_load_fpr_F(dc, rs);
2015 gen(cpu_env, src);
2017 gen_op_store_QT0_fpr(QFPREG(rd));
2018 gen_update_fprs_dirty(dc, QFPREG(rd));
2021 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2022 void (*gen)(TCGv_ptr, TCGv_i64))
2024 TCGv_i64 src;
2026 src = gen_load_fpr_D(dc, rs);
2028 gen(cpu_env, src);
2030 gen_op_store_QT0_fpr(QFPREG(rd));
2031 gen_update_fprs_dirty(dc, QFPREG(rd));
2034 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2035 TCGv addr, int mmu_idx, TCGMemOp memop)
2037 gen_address_mask(dc, addr);
2038 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2041 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2043 TCGv m1 = tcg_const_tl(0xff);
2044 gen_address_mask(dc, addr);
2045 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2046 tcg_temp_free(m1);
2049 /* asi moves */
2050 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2051 typedef enum {
2052 GET_ASI_HELPER,
2053 GET_ASI_EXCP,
2054 GET_ASI_DIRECT,
2055 GET_ASI_DTWINX,
2056 GET_ASI_BLOCK,
2057 GET_ASI_SHORT,
2058 GET_ASI_BCOPY,
2059 GET_ASI_BFILL,
2060 } ASIType;
2062 typedef struct {
2063 ASIType type;
2064 int asi;
2065 int mem_idx;
2066 TCGMemOp memop;
2067 } DisasASI;
2069 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2071 int asi = GET_FIELD(insn, 19, 26);
2072 ASIType type = GET_ASI_HELPER;
2073 int mem_idx = dc->mem_idx;
2075 #ifndef TARGET_SPARC64
2076 /* Before v9, all asis are immediate and privileged. */
2077 if (IS_IMM) {
2078 gen_exception(dc, TT_ILL_INSN);
2079 type = GET_ASI_EXCP;
2080 } else if (supervisor(dc)
2081 /* Note that LEON accepts ASI_USERDATA in user mode, for
2082 use with CASA. Also note that previous versions of
2083 QEMU allowed (and old versions of gcc emitted) ASI_P
2084 for LEON, which is incorrect. */
2085 || (asi == ASI_USERDATA
2086 && (dc->def->features & CPU_FEATURE_CASA))) {
2087 switch (asi) {
2088 case ASI_USERDATA: /* User data access */
2089 mem_idx = MMU_USER_IDX;
2090 type = GET_ASI_DIRECT;
2091 break;
2092 case ASI_KERNELDATA: /* Supervisor data access */
2093 mem_idx = MMU_KERNEL_IDX;
2094 type = GET_ASI_DIRECT;
2095 break;
2096 case ASI_M_BYPASS: /* MMU passthrough */
2097 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2098 mem_idx = MMU_PHYS_IDX;
2099 type = GET_ASI_DIRECT;
2100 break;
2101 case ASI_M_BCOPY: /* Block copy, sta access */
2102 mem_idx = MMU_KERNEL_IDX;
2103 type = GET_ASI_BCOPY;
2104 break;
2105 case ASI_M_BFILL: /* Block fill, stda access */
2106 mem_idx = MMU_KERNEL_IDX;
2107 type = GET_ASI_BFILL;
2108 break;
2110 } else {
2111 gen_exception(dc, TT_PRIV_INSN);
2112 type = GET_ASI_EXCP;
2114 #else
2115 if (IS_IMM) {
2116 asi = dc->asi;
2118 /* With v9, all asis below 0x80 are privileged. */
2119 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2120 down that bit into DisasContext. For the moment that's ok,
2121 since the direct implementations below doesn't have any ASIs
2122 in the restricted [0x30, 0x7f] range, and the check will be
2123 done properly in the helper. */
2124 if (!supervisor(dc) && asi < 0x80) {
2125 gen_exception(dc, TT_PRIV_ACT);
2126 type = GET_ASI_EXCP;
2127 } else {
2128 switch (asi) {
2129 case ASI_REAL: /* Bypass */
2130 case ASI_REAL_IO: /* Bypass, non-cacheable */
2131 case ASI_REAL_L: /* Bypass LE */
2132 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2133 case ASI_TWINX_REAL: /* Real address, twinx */
2134 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2135 case ASI_QUAD_LDD_PHYS:
2136 case ASI_QUAD_LDD_PHYS_L:
2137 mem_idx = MMU_PHYS_IDX;
2138 break;
2139 case ASI_N: /* Nucleus */
2140 case ASI_NL: /* Nucleus LE */
2141 case ASI_TWINX_N:
2142 case ASI_TWINX_NL:
2143 case ASI_NUCLEUS_QUAD_LDD:
2144 case ASI_NUCLEUS_QUAD_LDD_L:
2145 if (hypervisor(dc)) {
2146 mem_idx = MMU_PHYS_IDX;
2147 } else {
2148 mem_idx = MMU_NUCLEUS_IDX;
2150 break;
2151 case ASI_AIUP: /* As if user primary */
2152 case ASI_AIUPL: /* As if user primary LE */
2153 case ASI_TWINX_AIUP:
2154 case ASI_TWINX_AIUP_L:
2155 case ASI_BLK_AIUP_4V:
2156 case ASI_BLK_AIUP_L_4V:
2157 case ASI_BLK_AIUP:
2158 case ASI_BLK_AIUPL:
2159 mem_idx = MMU_USER_IDX;
2160 break;
2161 case ASI_AIUS: /* As if user secondary */
2162 case ASI_AIUSL: /* As if user secondary LE */
2163 case ASI_TWINX_AIUS:
2164 case ASI_TWINX_AIUS_L:
2165 case ASI_BLK_AIUS_4V:
2166 case ASI_BLK_AIUS_L_4V:
2167 case ASI_BLK_AIUS:
2168 case ASI_BLK_AIUSL:
2169 mem_idx = MMU_USER_SECONDARY_IDX;
2170 break;
2171 case ASI_S: /* Secondary */
2172 case ASI_SL: /* Secondary LE */
2173 case ASI_TWINX_S:
2174 case ASI_TWINX_SL:
2175 case ASI_BLK_COMMIT_S:
2176 case ASI_BLK_S:
2177 case ASI_BLK_SL:
2178 case ASI_FL8_S:
2179 case ASI_FL8_SL:
2180 case ASI_FL16_S:
2181 case ASI_FL16_SL:
2182 if (mem_idx == MMU_USER_IDX) {
2183 mem_idx = MMU_USER_SECONDARY_IDX;
2184 } else if (mem_idx == MMU_KERNEL_IDX) {
2185 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2187 break;
2188 case ASI_P: /* Primary */
2189 case ASI_PL: /* Primary LE */
2190 case ASI_TWINX_P:
2191 case ASI_TWINX_PL:
2192 case ASI_BLK_COMMIT_P:
2193 case ASI_BLK_P:
2194 case ASI_BLK_PL:
2195 case ASI_FL8_P:
2196 case ASI_FL8_PL:
2197 case ASI_FL16_P:
2198 case ASI_FL16_PL:
2199 break;
2201 switch (asi) {
2202 case ASI_REAL:
2203 case ASI_REAL_IO:
2204 case ASI_REAL_L:
2205 case ASI_REAL_IO_L:
2206 case ASI_N:
2207 case ASI_NL:
2208 case ASI_AIUP:
2209 case ASI_AIUPL:
2210 case ASI_AIUS:
2211 case ASI_AIUSL:
2212 case ASI_S:
2213 case ASI_SL:
2214 case ASI_P:
2215 case ASI_PL:
2216 type = GET_ASI_DIRECT;
2217 break;
2218 case ASI_TWINX_REAL:
2219 case ASI_TWINX_REAL_L:
2220 case ASI_TWINX_N:
2221 case ASI_TWINX_NL:
2222 case ASI_TWINX_AIUP:
2223 case ASI_TWINX_AIUP_L:
2224 case ASI_TWINX_AIUS:
2225 case ASI_TWINX_AIUS_L:
2226 case ASI_TWINX_P:
2227 case ASI_TWINX_PL:
2228 case ASI_TWINX_S:
2229 case ASI_TWINX_SL:
2230 case ASI_QUAD_LDD_PHYS:
2231 case ASI_QUAD_LDD_PHYS_L:
2232 case ASI_NUCLEUS_QUAD_LDD:
2233 case ASI_NUCLEUS_QUAD_LDD_L:
2234 type = GET_ASI_DTWINX;
2235 break;
2236 case ASI_BLK_COMMIT_P:
2237 case ASI_BLK_COMMIT_S:
2238 case ASI_BLK_AIUP_4V:
2239 case ASI_BLK_AIUP_L_4V:
2240 case ASI_BLK_AIUP:
2241 case ASI_BLK_AIUPL:
2242 case ASI_BLK_AIUS_4V:
2243 case ASI_BLK_AIUS_L_4V:
2244 case ASI_BLK_AIUS:
2245 case ASI_BLK_AIUSL:
2246 case ASI_BLK_S:
2247 case ASI_BLK_SL:
2248 case ASI_BLK_P:
2249 case ASI_BLK_PL:
2250 type = GET_ASI_BLOCK;
2251 break;
2252 case ASI_FL8_S:
2253 case ASI_FL8_SL:
2254 case ASI_FL8_P:
2255 case ASI_FL8_PL:
2256 memop = MO_UB;
2257 type = GET_ASI_SHORT;
2258 break;
2259 case ASI_FL16_S:
2260 case ASI_FL16_SL:
2261 case ASI_FL16_P:
2262 case ASI_FL16_PL:
2263 memop = MO_TEUW;
2264 type = GET_ASI_SHORT;
2265 break;
2267 /* The little-endian asis all have bit 3 set. */
2268 if (asi & 8) {
2269 memop ^= MO_BSWAP;
2272 #endif
2274 return (DisasASI){ type, asi, mem_idx, memop };
2277 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2278 int insn, TCGMemOp memop)
2280 DisasASI da = get_asi(dc, insn, memop);
2282 switch (da.type) {
2283 case GET_ASI_EXCP:
2284 break;
2285 case GET_ASI_DTWINX: /* Reserved for ldda. */
2286 gen_exception(dc, TT_ILL_INSN);
2287 break;
2288 case GET_ASI_DIRECT:
2289 gen_address_mask(dc, addr);
2290 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2291 break;
2292 default:
2294 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2295 TCGv_i32 r_mop = tcg_const_i32(memop);
2297 save_state(dc);
2298 #ifdef TARGET_SPARC64
2299 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2300 #else
2302 TCGv_i64 t64 = tcg_temp_new_i64();
2303 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2304 tcg_gen_trunc_i64_tl(dst, t64);
2305 tcg_temp_free_i64(t64);
2307 #endif
2308 tcg_temp_free_i32(r_mop);
2309 tcg_temp_free_i32(r_asi);
2311 break;
2315 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2316 int insn, TCGMemOp memop)
2318 DisasASI da = get_asi(dc, insn, memop);
2320 switch (da.type) {
2321 case GET_ASI_EXCP:
2322 break;
2323 case GET_ASI_DTWINX: /* Reserved for stda. */
2324 #ifndef TARGET_SPARC64
2325 gen_exception(dc, TT_ILL_INSN);
2326 break;
2327 #else
2328 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2329 /* Pre OpenSPARC CPUs don't have these */
2330 gen_exception(dc, TT_ILL_INSN);
2331 return;
2333 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2334 * are ST_BLKINIT_ ASIs */
2335 /* fall through */
2336 #endif
2337 case GET_ASI_DIRECT:
2338 gen_address_mask(dc, addr);
2339 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2340 break;
2341 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2342 case GET_ASI_BCOPY:
2343 /* Copy 32 bytes from the address in SRC to ADDR. */
2344 /* ??? The original qemu code suggests 4-byte alignment, dropping
2345 the low bits, but the only place I can see this used is in the
2346 Linux kernel with 32 byte alignment, which would make more sense
2347 as a cacheline-style operation. */
2349 TCGv saddr = tcg_temp_new();
2350 TCGv daddr = tcg_temp_new();
2351 TCGv four = tcg_const_tl(4);
2352 TCGv_i32 tmp = tcg_temp_new_i32();
2353 int i;
2355 tcg_gen_andi_tl(saddr, src, -4);
2356 tcg_gen_andi_tl(daddr, addr, -4);
2357 for (i = 0; i < 32; i += 4) {
2358 /* Since the loads and stores are paired, allow the
2359 copy to happen in the host endianness. */
2360 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2361 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2362 tcg_gen_add_tl(saddr, saddr, four);
2363 tcg_gen_add_tl(daddr, daddr, four);
2366 tcg_temp_free(saddr);
2367 tcg_temp_free(daddr);
2368 tcg_temp_free(four);
2369 tcg_temp_free_i32(tmp);
2371 break;
2372 #endif
2373 default:
2375 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2376 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2378 save_state(dc);
2379 #ifdef TARGET_SPARC64
2380 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2381 #else
2383 TCGv_i64 t64 = tcg_temp_new_i64();
2384 tcg_gen_extu_tl_i64(t64, src);
2385 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2386 tcg_temp_free_i64(t64);
2388 #endif
2389 tcg_temp_free_i32(r_mop);
2390 tcg_temp_free_i32(r_asi);
2392 /* A write to a TLB register may alter page maps. End the TB. */
2393 dc->npc = DYNAMIC_PC;
2395 break;
2399 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2400 TCGv addr, int insn)
2402 DisasASI da = get_asi(dc, insn, MO_TEUL);
2404 switch (da.type) {
2405 case GET_ASI_EXCP:
2406 break;
2407 case GET_ASI_DIRECT:
2408 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2409 break;
2410 default:
2411 /* ??? Should be DAE_invalid_asi. */
2412 gen_exception(dc, TT_DATA_ACCESS);
2413 break;
2417 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2418 int insn, int rd)
2420 DisasASI da = get_asi(dc, insn, MO_TEUL);
2421 TCGv oldv;
2423 switch (da.type) {
2424 case GET_ASI_EXCP:
2425 return;
2426 case GET_ASI_DIRECT:
2427 oldv = tcg_temp_new();
2428 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2429 da.mem_idx, da.memop);
2430 gen_store_gpr(dc, rd, oldv);
2431 tcg_temp_free(oldv);
2432 break;
2433 default:
2434 /* ??? Should be DAE_invalid_asi. */
2435 gen_exception(dc, TT_DATA_ACCESS);
2436 break;
2440 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2442 DisasASI da = get_asi(dc, insn, MO_UB);
2444 switch (da.type) {
2445 case GET_ASI_EXCP:
2446 break;
2447 case GET_ASI_DIRECT:
2448 gen_ldstub(dc, dst, addr, da.mem_idx);
2449 break;
2450 default:
2451 /* ??? In theory, this should be raise DAE_invalid_asi.
2452 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2453 if (parallel_cpus) {
2454 gen_helper_exit_atomic(cpu_env);
2455 } else {
2456 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2457 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2458 TCGv_i64 s64, t64;
2460 save_state(dc);
2461 t64 = tcg_temp_new_i64();
2462 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2464 s64 = tcg_const_i64(0xff);
2465 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2466 tcg_temp_free_i64(s64);
2467 tcg_temp_free_i32(r_mop);
2468 tcg_temp_free_i32(r_asi);
2470 tcg_gen_trunc_i64_tl(dst, t64);
2471 tcg_temp_free_i64(t64);
2473 /* End the TB. */
2474 dc->npc = DYNAMIC_PC;
2476 break;
2479 #endif
2481 #ifdef TARGET_SPARC64
2482 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2483 int insn, int size, int rd)
2485 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2486 TCGv_i32 d32;
2487 TCGv_i64 d64;
2489 switch (da.type) {
2490 case GET_ASI_EXCP:
2491 break;
2493 case GET_ASI_DIRECT:
2494 gen_address_mask(dc, addr);
2495 switch (size) {
2496 case 4:
2497 d32 = gen_dest_fpr_F(dc);
2498 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2499 gen_store_fpr_F(dc, rd, d32);
2500 break;
2501 case 8:
2502 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2503 da.memop | MO_ALIGN_4);
2504 break;
2505 case 16:
2506 d64 = tcg_temp_new_i64();
2507 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2508 tcg_gen_addi_tl(addr, addr, 8);
2509 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2510 da.memop | MO_ALIGN_4);
2511 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2512 tcg_temp_free_i64(d64);
2513 break;
2514 default:
2515 g_assert_not_reached();
2517 break;
2519 case GET_ASI_BLOCK:
2520 /* Valid for lddfa on aligned registers only. */
2521 if (size == 8 && (rd & 7) == 0) {
2522 TCGMemOp memop;
2523 TCGv eight;
2524 int i;
2526 gen_address_mask(dc, addr);
2528 /* The first operation checks required alignment. */
2529 memop = da.memop | MO_ALIGN_64;
2530 eight = tcg_const_tl(8);
2531 for (i = 0; ; ++i) {
2532 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2533 da.mem_idx, memop);
2534 if (i == 7) {
2535 break;
2537 tcg_gen_add_tl(addr, addr, eight);
2538 memop = da.memop;
2540 tcg_temp_free(eight);
2541 } else {
2542 gen_exception(dc, TT_ILL_INSN);
2544 break;
2546 case GET_ASI_SHORT:
2547 /* Valid for lddfa only. */
2548 if (size == 8) {
2549 gen_address_mask(dc, addr);
2550 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2551 } else {
2552 gen_exception(dc, TT_ILL_INSN);
2554 break;
2556 default:
2558 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2559 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2561 save_state(dc);
2562 /* According to the table in the UA2011 manual, the only
2563 other asis that are valid for ldfa/lddfa/ldqfa are
2564 the NO_FAULT asis. We still need a helper for these,
2565 but we can just use the integer asi helper for them. */
2566 switch (size) {
2567 case 4:
2568 d64 = tcg_temp_new_i64();
2569 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2570 d32 = gen_dest_fpr_F(dc);
2571 tcg_gen_extrl_i64_i32(d32, d64);
2572 tcg_temp_free_i64(d64);
2573 gen_store_fpr_F(dc, rd, d32);
2574 break;
2575 case 8:
2576 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2577 break;
2578 case 16:
2579 d64 = tcg_temp_new_i64();
2580 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2581 tcg_gen_addi_tl(addr, addr, 8);
2582 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2583 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2584 tcg_temp_free_i64(d64);
2585 break;
2586 default:
2587 g_assert_not_reached();
2589 tcg_temp_free_i32(r_mop);
2590 tcg_temp_free_i32(r_asi);
2592 break;
2596 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2597 int insn, int size, int rd)
2599 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2600 TCGv_i32 d32;
2602 switch (da.type) {
2603 case GET_ASI_EXCP:
2604 break;
2606 case GET_ASI_DIRECT:
2607 gen_address_mask(dc, addr);
2608 switch (size) {
2609 case 4:
2610 d32 = gen_load_fpr_F(dc, rd);
2611 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2612 break;
2613 case 8:
2614 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2615 da.memop | MO_ALIGN_4);
2616 break;
2617 case 16:
2618 /* Only 4-byte alignment required. However, it is legal for the
2619 cpu to signal the alignment fault, and the OS trap handler is
2620 required to fix it up. Requiring 16-byte alignment here avoids
2621 having to probe the second page before performing the first
2622 write. */
2623 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2624 da.memop | MO_ALIGN_16);
2625 tcg_gen_addi_tl(addr, addr, 8);
2626 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2627 break;
2628 default:
2629 g_assert_not_reached();
2631 break;
2633 case GET_ASI_BLOCK:
2634 /* Valid for stdfa on aligned registers only. */
2635 if (size == 8 && (rd & 7) == 0) {
2636 TCGMemOp memop;
2637 TCGv eight;
2638 int i;
2640 gen_address_mask(dc, addr);
2642 /* The first operation checks required alignment. */
2643 memop = da.memop | MO_ALIGN_64;
2644 eight = tcg_const_tl(8);
2645 for (i = 0; ; ++i) {
2646 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2647 da.mem_idx, memop);
2648 if (i == 7) {
2649 break;
2651 tcg_gen_add_tl(addr, addr, eight);
2652 memop = da.memop;
2654 tcg_temp_free(eight);
2655 } else {
2656 gen_exception(dc, TT_ILL_INSN);
2658 break;
2660 case GET_ASI_SHORT:
2661 /* Valid for stdfa only. */
2662 if (size == 8) {
2663 gen_address_mask(dc, addr);
2664 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2665 } else {
2666 gen_exception(dc, TT_ILL_INSN);
2668 break;
2670 default:
2671 /* According to the table in the UA2011 manual, the only
2672 other asis that are valid for ldfa/lddfa/ldqfa are
2673 the PST* asis, which aren't currently handled. */
2674 gen_exception(dc, TT_ILL_INSN);
2675 break;
2679 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2681 DisasASI da = get_asi(dc, insn, MO_TEQ);
2682 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2683 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2685 switch (da.type) {
2686 case GET_ASI_EXCP:
2687 return;
2689 case GET_ASI_DTWINX:
2690 gen_address_mask(dc, addr);
2691 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2692 tcg_gen_addi_tl(addr, addr, 8);
2693 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2694 break;
2696 case GET_ASI_DIRECT:
2698 TCGv_i64 tmp = tcg_temp_new_i64();
2700 gen_address_mask(dc, addr);
2701 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2703 /* Note that LE ldda acts as if each 32-bit register
2704 result is byte swapped. Having just performed one
2705 64-bit bswap, we need now to swap the writebacks. */
2706 if ((da.memop & MO_BSWAP) == MO_TE) {
2707 tcg_gen_extr32_i64(lo, hi, tmp);
2708 } else {
2709 tcg_gen_extr32_i64(hi, lo, tmp);
2711 tcg_temp_free_i64(tmp);
2713 break;
2715 default:
2716 /* ??? In theory we've handled all of the ASIs that are valid
2717 for ldda, and this should raise DAE_invalid_asi. However,
2718 real hardware allows others. This can be seen with e.g.
2719 FreeBSD 10.3 wrt ASI_IC_TAG. */
2721 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2722 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2723 TCGv_i64 tmp = tcg_temp_new_i64();
2725 save_state(dc);
2726 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2727 tcg_temp_free_i32(r_asi);
2728 tcg_temp_free_i32(r_mop);
2730 /* See above. */
2731 if ((da.memop & MO_BSWAP) == MO_TE) {
2732 tcg_gen_extr32_i64(lo, hi, tmp);
2733 } else {
2734 tcg_gen_extr32_i64(hi, lo, tmp);
2736 tcg_temp_free_i64(tmp);
2738 break;
2741 gen_store_gpr(dc, rd, hi);
2742 gen_store_gpr(dc, rd + 1, lo);
2745 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2746 int insn, int rd)
2748 DisasASI da = get_asi(dc, insn, MO_TEQ);
2749 TCGv lo = gen_load_gpr(dc, rd + 1);
2751 switch (da.type) {
2752 case GET_ASI_EXCP:
2753 break;
2755 case GET_ASI_DTWINX:
2756 gen_address_mask(dc, addr);
2757 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2758 tcg_gen_addi_tl(addr, addr, 8);
2759 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2760 break;
2762 case GET_ASI_DIRECT:
2764 TCGv_i64 t64 = tcg_temp_new_i64();
2766 /* Note that LE stda acts as if each 32-bit register result is
2767 byte swapped. We will perform one 64-bit LE store, so now
2768 we must swap the order of the construction. */
2769 if ((da.memop & MO_BSWAP) == MO_TE) {
2770 tcg_gen_concat32_i64(t64, lo, hi);
2771 } else {
2772 tcg_gen_concat32_i64(t64, hi, lo);
2774 gen_address_mask(dc, addr);
2775 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2776 tcg_temp_free_i64(t64);
2778 break;
2780 default:
2781 /* ??? In theory we've handled all of the ASIs that are valid
2782 for stda, and this should raise DAE_invalid_asi. */
2784 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2785 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2786 TCGv_i64 t64 = tcg_temp_new_i64();
2788 /* See above. */
2789 if ((da.memop & MO_BSWAP) == MO_TE) {
2790 tcg_gen_concat32_i64(t64, lo, hi);
2791 } else {
2792 tcg_gen_concat32_i64(t64, hi, lo);
2795 save_state(dc);
2796 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2797 tcg_temp_free_i32(r_mop);
2798 tcg_temp_free_i32(r_asi);
2799 tcg_temp_free_i64(t64);
2801 break;
2805 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2806 int insn, int rd)
2808 DisasASI da = get_asi(dc, insn, MO_TEQ);
2809 TCGv oldv;
2811 switch (da.type) {
2812 case GET_ASI_EXCP:
2813 return;
2814 case GET_ASI_DIRECT:
2815 oldv = tcg_temp_new();
2816 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2817 da.mem_idx, da.memop);
2818 gen_store_gpr(dc, rd, oldv);
2819 tcg_temp_free(oldv);
2820 break;
2821 default:
2822 /* ??? Should be DAE_invalid_asi. */
2823 gen_exception(dc, TT_DATA_ACCESS);
2824 break;
2828 #elif !defined(CONFIG_USER_ONLY)
2829 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2831 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2832 whereby "rd + 1" elicits "error: array subscript is above array".
2833 Since we have already asserted that rd is even, the semantics
2834 are unchanged. */
2835 TCGv lo = gen_dest_gpr(dc, rd | 1);
2836 TCGv hi = gen_dest_gpr(dc, rd);
2837 TCGv_i64 t64 = tcg_temp_new_i64();
2838 DisasASI da = get_asi(dc, insn, MO_TEQ);
2840 switch (da.type) {
2841 case GET_ASI_EXCP:
2842 tcg_temp_free_i64(t64);
2843 return;
2844 case GET_ASI_DIRECT:
2845 gen_address_mask(dc, addr);
2846 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2847 break;
2848 default:
2850 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2851 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2853 save_state(dc);
2854 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2855 tcg_temp_free_i32(r_mop);
2856 tcg_temp_free_i32(r_asi);
2858 break;
2861 tcg_gen_extr_i64_i32(lo, hi, t64);
2862 tcg_temp_free_i64(t64);
2863 gen_store_gpr(dc, rd | 1, lo);
2864 gen_store_gpr(dc, rd, hi);
2867 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2868 int insn, int rd)
2870 DisasASI da = get_asi(dc, insn, MO_TEQ);
2871 TCGv lo = gen_load_gpr(dc, rd + 1);
2872 TCGv_i64 t64 = tcg_temp_new_i64();
2874 tcg_gen_concat_tl_i64(t64, lo, hi);
2876 switch (da.type) {
2877 case GET_ASI_EXCP:
2878 break;
2879 case GET_ASI_DIRECT:
2880 gen_address_mask(dc, addr);
2881 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2882 break;
2883 case GET_ASI_BFILL:
2884 /* Store 32 bytes of T64 to ADDR. */
2885 /* ??? The original qemu code suggests 8-byte alignment, dropping
2886 the low bits, but the only place I can see this used is in the
2887 Linux kernel with 32 byte alignment, which would make more sense
2888 as a cacheline-style operation. */
2890 TCGv d_addr = tcg_temp_new();
2891 TCGv eight = tcg_const_tl(8);
2892 int i;
2894 tcg_gen_andi_tl(d_addr, addr, -8);
2895 for (i = 0; i < 32; i += 8) {
2896 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2897 tcg_gen_add_tl(d_addr, d_addr, eight);
2900 tcg_temp_free(d_addr);
2901 tcg_temp_free(eight);
2903 break;
2904 default:
2906 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2907 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2909 save_state(dc);
2910 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2911 tcg_temp_free_i32(r_mop);
2912 tcg_temp_free_i32(r_asi);
2914 break;
2917 tcg_temp_free_i64(t64);
2919 #endif
2921 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2923 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2924 return gen_load_gpr(dc, rs1);
2927 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2929 if (IS_IMM) { /* immediate */
2930 target_long simm = GET_FIELDs(insn, 19, 31);
2931 TCGv t = get_temp_tl(dc);
2932 tcg_gen_movi_tl(t, simm);
2933 return t;
2934 } else { /* register */
2935 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2936 return gen_load_gpr(dc, rs2);
2940 #ifdef TARGET_SPARC64
2941 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2943 TCGv_i32 c32, zero, dst, s1, s2;
2945 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2946 or fold the comparison down to 32 bits and use movcond_i32. Choose
2947 the later. */
2948 c32 = tcg_temp_new_i32();
2949 if (cmp->is_bool) {
2950 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2951 } else {
2952 TCGv_i64 c64 = tcg_temp_new_i64();
2953 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2954 tcg_gen_extrl_i64_i32(c32, c64);
2955 tcg_temp_free_i64(c64);
2958 s1 = gen_load_fpr_F(dc, rs);
2959 s2 = gen_load_fpr_F(dc, rd);
2960 dst = gen_dest_fpr_F(dc);
2961 zero = tcg_const_i32(0);
2963 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2965 tcg_temp_free_i32(c32);
2966 tcg_temp_free_i32(zero);
2967 gen_store_fpr_F(dc, rd, dst);
2970 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2972 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2973 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2974 gen_load_fpr_D(dc, rs),
2975 gen_load_fpr_D(dc, rd));
2976 gen_store_fpr_D(dc, rd, dst);
2979 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2981 int qd = QFPREG(rd);
2982 int qs = QFPREG(rs);
2984 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2985 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2986 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2987 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2989 gen_update_fprs_dirty(dc, qd);
2992 #ifndef CONFIG_USER_ONLY
2993 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2995 TCGv_i32 r_tl = tcg_temp_new_i32();
2997 /* load env->tl into r_tl */
2998 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
3000 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
3001 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
3003 /* calculate offset to current trap state from env->ts, reuse r_tl */
3004 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
3005 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
3007 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
3009 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
3010 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
3011 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
3012 tcg_temp_free_ptr(r_tl_tmp);
3015 tcg_temp_free_i32(r_tl);
3017 #endif
3019 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
3020 int width, bool cc, bool left)
3022 TCGv lo1, lo2, t1, t2;
3023 uint64_t amask, tabl, tabr;
3024 int shift, imask, omask;
3026 if (cc) {
3027 tcg_gen_mov_tl(cpu_cc_src, s1);
3028 tcg_gen_mov_tl(cpu_cc_src2, s2);
3029 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3030 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3031 dc->cc_op = CC_OP_SUB;
3034 /* Theory of operation: there are two tables, left and right (not to
3035 be confused with the left and right versions of the opcode). These
3036 are indexed by the low 3 bits of the inputs. To make things "easy",
3037 these tables are loaded into two constants, TABL and TABR below.
3038 The operation index = (input & imask) << shift calculates the index
3039 into the constant, while val = (table >> index) & omask calculates
3040 the value we're looking for. */
3041 switch (width) {
3042 case 8:
3043 imask = 0x7;
3044 shift = 3;
3045 omask = 0xff;
3046 if (left) {
3047 tabl = 0x80c0e0f0f8fcfeffULL;
3048 tabr = 0xff7f3f1f0f070301ULL;
3049 } else {
3050 tabl = 0x0103070f1f3f7fffULL;
3051 tabr = 0xfffefcf8f0e0c080ULL;
3053 break;
3054 case 16:
3055 imask = 0x6;
3056 shift = 1;
3057 omask = 0xf;
3058 if (left) {
3059 tabl = 0x8cef;
3060 tabr = 0xf731;
3061 } else {
3062 tabl = 0x137f;
3063 tabr = 0xfec8;
3065 break;
3066 case 32:
3067 imask = 0x4;
3068 shift = 0;
3069 omask = 0x3;
3070 if (left) {
3071 tabl = (2 << 2) | 3;
3072 tabr = (3 << 2) | 1;
3073 } else {
3074 tabl = (1 << 2) | 3;
3075 tabr = (3 << 2) | 2;
3077 break;
3078 default:
3079 abort();
3082 lo1 = tcg_temp_new();
3083 lo2 = tcg_temp_new();
3084 tcg_gen_andi_tl(lo1, s1, imask);
3085 tcg_gen_andi_tl(lo2, s2, imask);
3086 tcg_gen_shli_tl(lo1, lo1, shift);
3087 tcg_gen_shli_tl(lo2, lo2, shift);
3089 t1 = tcg_const_tl(tabl);
3090 t2 = tcg_const_tl(tabr);
3091 tcg_gen_shr_tl(lo1, t1, lo1);
3092 tcg_gen_shr_tl(lo2, t2, lo2);
3093 tcg_gen_andi_tl(dst, lo1, omask);
3094 tcg_gen_andi_tl(lo2, lo2, omask);
3096 amask = -8;
3097 if (AM_CHECK(dc)) {
3098 amask &= 0xffffffffULL;
3100 tcg_gen_andi_tl(s1, s1, amask);
3101 tcg_gen_andi_tl(s2, s2, amask);
3103 /* We want to compute
3104 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3105 We've already done dst = lo1, so this reduces to
3106 dst &= (s1 == s2 ? -1 : lo2)
3107 Which we perform by
3108 lo2 |= -(s1 == s2)
3109 dst &= lo2
3111 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3112 tcg_gen_neg_tl(t1, t1);
3113 tcg_gen_or_tl(lo2, lo2, t1);
3114 tcg_gen_and_tl(dst, dst, lo2);
3116 tcg_temp_free(lo1);
3117 tcg_temp_free(lo2);
3118 tcg_temp_free(t1);
3119 tcg_temp_free(t2);
3122 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3124 TCGv tmp = tcg_temp_new();
3126 tcg_gen_add_tl(tmp, s1, s2);
3127 tcg_gen_andi_tl(dst, tmp, -8);
3128 if (left) {
3129 tcg_gen_neg_tl(tmp, tmp);
3131 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3133 tcg_temp_free(tmp);
3136 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3138 TCGv t1, t2, shift;
3140 t1 = tcg_temp_new();
3141 t2 = tcg_temp_new();
3142 shift = tcg_temp_new();
3144 tcg_gen_andi_tl(shift, gsr, 7);
3145 tcg_gen_shli_tl(shift, shift, 3);
3146 tcg_gen_shl_tl(t1, s1, shift);
3148 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3149 shift of (up to 63) followed by a constant shift of 1. */
3150 tcg_gen_xori_tl(shift, shift, 63);
3151 tcg_gen_shr_tl(t2, s2, shift);
3152 tcg_gen_shri_tl(t2, t2, 1);
3154 tcg_gen_or_tl(dst, t1, t2);
3156 tcg_temp_free(t1);
3157 tcg_temp_free(t2);
3158 tcg_temp_free(shift);
3160 #endif
3162 #define CHECK_IU_FEATURE(dc, FEATURE) \
3163 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3164 goto illegal_insn;
3165 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3166 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3167 goto nfpu_insn;
3169 /* before an instruction, dc->pc must be static */
3170 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3172 unsigned int opc, rs1, rs2, rd;
3173 TCGv cpu_src1, cpu_src2;
3174 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3175 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3176 target_long simm;
3178 opc = GET_FIELD(insn, 0, 1);
3179 rd = GET_FIELD(insn, 2, 6);
3181 switch (opc) {
3182 case 0: /* branches/sethi */
3184 unsigned int xop = GET_FIELD(insn, 7, 9);
3185 int32_t target;
3186 switch (xop) {
3187 #ifdef TARGET_SPARC64
3188 case 0x1: /* V9 BPcc */
3190 int cc;
3192 target = GET_FIELD_SP(insn, 0, 18);
3193 target = sign_extend(target, 19);
3194 target <<= 2;
3195 cc = GET_FIELD_SP(insn, 20, 21);
3196 if (cc == 0)
3197 do_branch(dc, target, insn, 0);
3198 else if (cc == 2)
3199 do_branch(dc, target, insn, 1);
3200 else
3201 goto illegal_insn;
3202 goto jmp_insn;
3204 case 0x3: /* V9 BPr */
3206 target = GET_FIELD_SP(insn, 0, 13) |
3207 (GET_FIELD_SP(insn, 20, 21) << 14);
3208 target = sign_extend(target, 16);
3209 target <<= 2;
3210 cpu_src1 = get_src1(dc, insn);
3211 do_branch_reg(dc, target, insn, cpu_src1);
3212 goto jmp_insn;
3214 case 0x5: /* V9 FBPcc */
3216 int cc = GET_FIELD_SP(insn, 20, 21);
3217 if (gen_trap_ifnofpu(dc)) {
3218 goto jmp_insn;
3220 target = GET_FIELD_SP(insn, 0, 18);
3221 target = sign_extend(target, 19);
3222 target <<= 2;
3223 do_fbranch(dc, target, insn, cc);
3224 goto jmp_insn;
3226 #else
3227 case 0x7: /* CBN+x */
3229 goto ncp_insn;
3231 #endif
3232 case 0x2: /* BN+x */
3234 target = GET_FIELD(insn, 10, 31);
3235 target = sign_extend(target, 22);
3236 target <<= 2;
3237 do_branch(dc, target, insn, 0);
3238 goto jmp_insn;
3240 case 0x6: /* FBN+x */
3242 if (gen_trap_ifnofpu(dc)) {
3243 goto jmp_insn;
3245 target = GET_FIELD(insn, 10, 31);
3246 target = sign_extend(target, 22);
3247 target <<= 2;
3248 do_fbranch(dc, target, insn, 0);
3249 goto jmp_insn;
3251 case 0x4: /* SETHI */
3252 /* Special-case %g0 because that's the canonical nop. */
3253 if (rd) {
3254 uint32_t value = GET_FIELD(insn, 10, 31);
3255 TCGv t = gen_dest_gpr(dc, rd);
3256 tcg_gen_movi_tl(t, value << 10);
3257 gen_store_gpr(dc, rd, t);
3259 break;
3260 case 0x0: /* UNIMPL */
3261 default:
3262 goto illegal_insn;
3264 break;
3266 break;
3267 case 1: /*CALL*/
3269 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3270 TCGv o7 = gen_dest_gpr(dc, 15);
3272 tcg_gen_movi_tl(o7, dc->pc);
3273 gen_store_gpr(dc, 15, o7);
3274 target += dc->pc;
3275 gen_mov_pc_npc(dc);
3276 #ifdef TARGET_SPARC64
3277 if (unlikely(AM_CHECK(dc))) {
3278 target &= 0xffffffffULL;
3280 #endif
3281 dc->npc = target;
3283 goto jmp_insn;
3284 case 2: /* FPU & Logical Operations */
3286 unsigned int xop = GET_FIELD(insn, 7, 12);
3287 TCGv cpu_dst = get_temp_tl(dc);
3288 TCGv cpu_tmp0;
3290 if (xop == 0x3a) { /* generate trap */
3291 int cond = GET_FIELD(insn, 3, 6);
3292 TCGv_i32 trap;
3293 TCGLabel *l1 = NULL;
3294 int mask;
3296 if (cond == 0) {
3297 /* Trap never. */
3298 break;
3301 save_state(dc);
3303 if (cond != 8) {
3304 /* Conditional trap. */
3305 DisasCompare cmp;
3306 #ifdef TARGET_SPARC64
3307 /* V9 icc/xcc */
3308 int cc = GET_FIELD_SP(insn, 11, 12);
3309 if (cc == 0) {
3310 gen_compare(&cmp, 0, cond, dc);
3311 } else if (cc == 2) {
3312 gen_compare(&cmp, 1, cond, dc);
3313 } else {
3314 goto illegal_insn;
3316 #else
3317 gen_compare(&cmp, 0, cond, dc);
3318 #endif
3319 l1 = gen_new_label();
3320 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3321 cmp.c1, cmp.c2, l1);
3322 free_compare(&cmp);
3325 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3326 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3328 /* Don't use the normal temporaries, as they may well have
3329 gone out of scope with the branch above. While we're
3330 doing that we might as well pre-truncate to 32-bit. */
3331 trap = tcg_temp_new_i32();
3333 rs1 = GET_FIELD_SP(insn, 14, 18);
3334 if (IS_IMM) {
3335 rs2 = GET_FIELD_SP(insn, 0, 7);
3336 if (rs1 == 0) {
3337 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3338 /* Signal that the trap value is fully constant. */
3339 mask = 0;
3340 } else {
3341 TCGv t1 = gen_load_gpr(dc, rs1);
3342 tcg_gen_trunc_tl_i32(trap, t1);
3343 tcg_gen_addi_i32(trap, trap, rs2);
3345 } else {
3346 TCGv t1, t2;
3347 rs2 = GET_FIELD_SP(insn, 0, 4);
3348 t1 = gen_load_gpr(dc, rs1);
3349 t2 = gen_load_gpr(dc, rs2);
3350 tcg_gen_add_tl(t1, t1, t2);
3351 tcg_gen_trunc_tl_i32(trap, t1);
3353 if (mask != 0) {
3354 tcg_gen_andi_i32(trap, trap, mask);
3355 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3358 gen_helper_raise_exception(cpu_env, trap);
3359 tcg_temp_free_i32(trap);
3361 if (cond == 8) {
3362 /* An unconditional trap ends the TB. */
3363 dc->is_br = 1;
3364 goto jmp_insn;
3365 } else {
3366 /* A conditional trap falls through to the next insn. */
3367 gen_set_label(l1);
3368 break;
3370 } else if (xop == 0x28) {
3371 rs1 = GET_FIELD(insn, 13, 17);
3372 switch(rs1) {
3373 case 0: /* rdy */
3374 #ifndef TARGET_SPARC64
3375 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3376 manual, rdy on the microSPARC
3377 II */
3378 case 0x0f: /* stbar in the SPARCv8 manual,
3379 rdy on the microSPARC II */
3380 case 0x10 ... 0x1f: /* implementation-dependent in the
3381 SPARCv8 manual, rdy on the
3382 microSPARC II */
3383 /* Read Asr17 */
3384 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3385 TCGv t = gen_dest_gpr(dc, rd);
3386 /* Read Asr17 for a Leon3 monoprocessor */
3387 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3388 gen_store_gpr(dc, rd, t);
3389 break;
3391 #endif
3392 gen_store_gpr(dc, rd, cpu_y);
3393 break;
3394 #ifdef TARGET_SPARC64
3395 case 0x2: /* V9 rdccr */
3396 update_psr(dc);
3397 gen_helper_rdccr(cpu_dst, cpu_env);
3398 gen_store_gpr(dc, rd, cpu_dst);
3399 break;
3400 case 0x3: /* V9 rdasi */
3401 tcg_gen_movi_tl(cpu_dst, dc->asi);
3402 gen_store_gpr(dc, rd, cpu_dst);
3403 break;
3404 case 0x4: /* V9 rdtick */
3406 TCGv_ptr r_tickptr;
3407 TCGv_i32 r_const;
3409 r_tickptr = tcg_temp_new_ptr();
3410 r_const = tcg_const_i32(dc->mem_idx);
3411 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3412 offsetof(CPUSPARCState, tick));
3413 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3414 r_const);
3415 tcg_temp_free_ptr(r_tickptr);
3416 tcg_temp_free_i32(r_const);
3417 gen_store_gpr(dc, rd, cpu_dst);
3419 break;
3420 case 0x5: /* V9 rdpc */
3422 TCGv t = gen_dest_gpr(dc, rd);
3423 if (unlikely(AM_CHECK(dc))) {
3424 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3425 } else {
3426 tcg_gen_movi_tl(t, dc->pc);
3428 gen_store_gpr(dc, rd, t);
3430 break;
3431 case 0x6: /* V9 rdfprs */
3432 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3433 gen_store_gpr(dc, rd, cpu_dst);
3434 break;
3435 case 0xf: /* V9 membar */
3436 break; /* no effect */
3437 case 0x13: /* Graphics Status */
3438 if (gen_trap_ifnofpu(dc)) {
3439 goto jmp_insn;
3441 gen_store_gpr(dc, rd, cpu_gsr);
3442 break;
3443 case 0x16: /* Softint */
3444 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3445 offsetof(CPUSPARCState, softint));
3446 gen_store_gpr(dc, rd, cpu_dst);
3447 break;
3448 case 0x17: /* Tick compare */
3449 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3450 break;
3451 case 0x18: /* System tick */
3453 TCGv_ptr r_tickptr;
3454 TCGv_i32 r_const;
3456 r_tickptr = tcg_temp_new_ptr();
3457 r_const = tcg_const_i32(dc->mem_idx);
3458 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3459 offsetof(CPUSPARCState, stick));
3460 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3461 r_const);
3462 tcg_temp_free_ptr(r_tickptr);
3463 tcg_temp_free_i32(r_const);
3464 gen_store_gpr(dc, rd, cpu_dst);
3466 break;
3467 case 0x19: /* System tick compare */
3468 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3469 break;
3470 case 0x1a: /* UltraSPARC-T1 Strand status */
3471 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3472 * this ASR as impl. dep
3474 CHECK_IU_FEATURE(dc, HYPV);
3476 TCGv t = gen_dest_gpr(dc, rd);
3477 tcg_gen_movi_tl(t, 1UL);
3478 gen_store_gpr(dc, rd, t);
3480 break;
3481 case 0x10: /* Performance Control */
3482 case 0x11: /* Performance Instrumentation Counter */
3483 case 0x12: /* Dispatch Control */
3484 case 0x14: /* Softint set, WO */
3485 case 0x15: /* Softint clear, WO */
3486 #endif
3487 default:
3488 goto illegal_insn;
3490 #if !defined(CONFIG_USER_ONLY)
3491 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3492 #ifndef TARGET_SPARC64
3493 if (!supervisor(dc)) {
3494 goto priv_insn;
3496 update_psr(dc);
3497 gen_helper_rdpsr(cpu_dst, cpu_env);
3498 #else
3499 CHECK_IU_FEATURE(dc, HYPV);
3500 if (!hypervisor(dc))
3501 goto priv_insn;
3502 rs1 = GET_FIELD(insn, 13, 17);
3503 switch (rs1) {
3504 case 0: // hpstate
3505 tcg_gen_ld_i64(cpu_dst, cpu_env,
3506 offsetof(CPUSPARCState, hpstate));
3507 break;
3508 case 1: // htstate
3509 // gen_op_rdhtstate();
3510 break;
3511 case 3: // hintp
3512 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3513 break;
3514 case 5: // htba
3515 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3516 break;
3517 case 6: // hver
3518 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3519 break;
3520 case 31: // hstick_cmpr
3521 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3522 break;
3523 default:
3524 goto illegal_insn;
3526 #endif
3527 gen_store_gpr(dc, rd, cpu_dst);
3528 break;
3529 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3530 if (!supervisor(dc)) {
3531 goto priv_insn;
3533 cpu_tmp0 = get_temp_tl(dc);
3534 #ifdef TARGET_SPARC64
3535 rs1 = GET_FIELD(insn, 13, 17);
3536 switch (rs1) {
3537 case 0: // tpc
3539 TCGv_ptr r_tsptr;
3541 r_tsptr = tcg_temp_new_ptr();
3542 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3543 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3544 offsetof(trap_state, tpc));
3545 tcg_temp_free_ptr(r_tsptr);
3547 break;
3548 case 1: // tnpc
3550 TCGv_ptr r_tsptr;
3552 r_tsptr = tcg_temp_new_ptr();
3553 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3554 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3555 offsetof(trap_state, tnpc));
3556 tcg_temp_free_ptr(r_tsptr);
3558 break;
3559 case 2: // tstate
3561 TCGv_ptr r_tsptr;
3563 r_tsptr = tcg_temp_new_ptr();
3564 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3565 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3566 offsetof(trap_state, tstate));
3567 tcg_temp_free_ptr(r_tsptr);
3569 break;
3570 case 3: // tt
3572 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3574 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3575 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3576 offsetof(trap_state, tt));
3577 tcg_temp_free_ptr(r_tsptr);
3579 break;
3580 case 4: // tick
3582 TCGv_ptr r_tickptr;
3583 TCGv_i32 r_const;
3585 r_tickptr = tcg_temp_new_ptr();
3586 r_const = tcg_const_i32(dc->mem_idx);
3587 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3588 offsetof(CPUSPARCState, tick));
3589 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3590 r_tickptr, r_const);
3591 tcg_temp_free_ptr(r_tickptr);
3592 tcg_temp_free_i32(r_const);
3594 break;
3595 case 5: // tba
3596 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3597 break;
3598 case 6: // pstate
3599 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3600 offsetof(CPUSPARCState, pstate));
3601 break;
3602 case 7: // tl
3603 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3604 offsetof(CPUSPARCState, tl));
3605 break;
3606 case 8: // pil
3607 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3608 offsetof(CPUSPARCState, psrpil));
3609 break;
3610 case 9: // cwp
3611 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3612 break;
3613 case 10: // cansave
3614 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3615 offsetof(CPUSPARCState, cansave));
3616 break;
3617 case 11: // canrestore
3618 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3619 offsetof(CPUSPARCState, canrestore));
3620 break;
3621 case 12: // cleanwin
3622 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3623 offsetof(CPUSPARCState, cleanwin));
3624 break;
3625 case 13: // otherwin
3626 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3627 offsetof(CPUSPARCState, otherwin));
3628 break;
3629 case 14: // wstate
3630 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3631 offsetof(CPUSPARCState, wstate));
3632 break;
3633 case 16: // UA2005 gl
3634 CHECK_IU_FEATURE(dc, GL);
3635 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3636 offsetof(CPUSPARCState, gl));
3637 break;
3638 case 26: // UA2005 strand status
3639 CHECK_IU_FEATURE(dc, HYPV);
3640 if (!hypervisor(dc))
3641 goto priv_insn;
3642 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3643 break;
3644 case 31: // ver
3645 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3646 break;
3647 case 15: // fq
3648 default:
3649 goto illegal_insn;
3651 #else
3652 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3653 #endif
3654 gen_store_gpr(dc, rd, cpu_tmp0);
3655 break;
3656 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3657 #ifdef TARGET_SPARC64
3658 gen_helper_flushw(cpu_env);
3659 #else
3660 if (!supervisor(dc))
3661 goto priv_insn;
3662 gen_store_gpr(dc, rd, cpu_tbr);
3663 #endif
3664 break;
3665 #endif
3666 } else if (xop == 0x34) { /* FPU Operations */
3667 if (gen_trap_ifnofpu(dc)) {
3668 goto jmp_insn;
3670 gen_op_clear_ieee_excp_and_FTT();
3671 rs1 = GET_FIELD(insn, 13, 17);
3672 rs2 = GET_FIELD(insn, 27, 31);
3673 xop = GET_FIELD(insn, 18, 26);
3675 switch (xop) {
3676 case 0x1: /* fmovs */
3677 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3678 gen_store_fpr_F(dc, rd, cpu_src1_32);
3679 break;
3680 case 0x5: /* fnegs */
3681 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3682 break;
3683 case 0x9: /* fabss */
3684 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3685 break;
3686 case 0x29: /* fsqrts */
3687 CHECK_FPU_FEATURE(dc, FSQRT);
3688 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3689 break;
3690 case 0x2a: /* fsqrtd */
3691 CHECK_FPU_FEATURE(dc, FSQRT);
3692 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3693 break;
3694 case 0x2b: /* fsqrtq */
3695 CHECK_FPU_FEATURE(dc, FLOAT128);
3696 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3697 break;
3698 case 0x41: /* fadds */
3699 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3700 break;
3701 case 0x42: /* faddd */
3702 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3703 break;
3704 case 0x43: /* faddq */
3705 CHECK_FPU_FEATURE(dc, FLOAT128);
3706 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3707 break;
3708 case 0x45: /* fsubs */
3709 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3710 break;
3711 case 0x46: /* fsubd */
3712 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3713 break;
3714 case 0x47: /* fsubq */
3715 CHECK_FPU_FEATURE(dc, FLOAT128);
3716 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3717 break;
3718 case 0x49: /* fmuls */
3719 CHECK_FPU_FEATURE(dc, FMUL);
3720 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3721 break;
3722 case 0x4a: /* fmuld */
3723 CHECK_FPU_FEATURE(dc, FMUL);
3724 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3725 break;
3726 case 0x4b: /* fmulq */
3727 CHECK_FPU_FEATURE(dc, FLOAT128);
3728 CHECK_FPU_FEATURE(dc, FMUL);
3729 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3730 break;
3731 case 0x4d: /* fdivs */
3732 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3733 break;
3734 case 0x4e: /* fdivd */
3735 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3736 break;
3737 case 0x4f: /* fdivq */
3738 CHECK_FPU_FEATURE(dc, FLOAT128);
3739 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3740 break;
3741 case 0x69: /* fsmuld */
3742 CHECK_FPU_FEATURE(dc, FSMULD);
3743 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3744 break;
3745 case 0x6e: /* fdmulq */
3746 CHECK_FPU_FEATURE(dc, FLOAT128);
3747 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3748 break;
3749 case 0xc4: /* fitos */
3750 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3751 break;
3752 case 0xc6: /* fdtos */
3753 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3754 break;
3755 case 0xc7: /* fqtos */
3756 CHECK_FPU_FEATURE(dc, FLOAT128);
3757 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3758 break;
3759 case 0xc8: /* fitod */
3760 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3761 break;
3762 case 0xc9: /* fstod */
3763 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3764 break;
3765 case 0xcb: /* fqtod */
3766 CHECK_FPU_FEATURE(dc, FLOAT128);
3767 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3768 break;
3769 case 0xcc: /* fitoq */
3770 CHECK_FPU_FEATURE(dc, FLOAT128);
3771 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3772 break;
3773 case 0xcd: /* fstoq */
3774 CHECK_FPU_FEATURE(dc, FLOAT128);
3775 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3776 break;
3777 case 0xce: /* fdtoq */
3778 CHECK_FPU_FEATURE(dc, FLOAT128);
3779 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3780 break;
3781 case 0xd1: /* fstoi */
3782 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3783 break;
3784 case 0xd2: /* fdtoi */
3785 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3786 break;
3787 case 0xd3: /* fqtoi */
3788 CHECK_FPU_FEATURE(dc, FLOAT128);
3789 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3790 break;
3791 #ifdef TARGET_SPARC64
3792 case 0x2: /* V9 fmovd */
3793 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3794 gen_store_fpr_D(dc, rd, cpu_src1_64);
3795 break;
3796 case 0x3: /* V9 fmovq */
3797 CHECK_FPU_FEATURE(dc, FLOAT128);
3798 gen_move_Q(dc, rd, rs2);
3799 break;
3800 case 0x6: /* V9 fnegd */
3801 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3802 break;
3803 case 0x7: /* V9 fnegq */
3804 CHECK_FPU_FEATURE(dc, FLOAT128);
3805 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3806 break;
3807 case 0xa: /* V9 fabsd */
3808 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3809 break;
3810 case 0xb: /* V9 fabsq */
3811 CHECK_FPU_FEATURE(dc, FLOAT128);
3812 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3813 break;
3814 case 0x81: /* V9 fstox */
3815 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3816 break;
3817 case 0x82: /* V9 fdtox */
3818 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3819 break;
3820 case 0x83: /* V9 fqtox */
3821 CHECK_FPU_FEATURE(dc, FLOAT128);
3822 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3823 break;
3824 case 0x84: /* V9 fxtos */
3825 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3826 break;
3827 case 0x88: /* V9 fxtod */
3828 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3829 break;
3830 case 0x8c: /* V9 fxtoq */
3831 CHECK_FPU_FEATURE(dc, FLOAT128);
3832 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3833 break;
3834 #endif
3835 default:
3836 goto illegal_insn;
3838 } else if (xop == 0x35) { /* FPU Operations */
3839 #ifdef TARGET_SPARC64
3840 int cond;
3841 #endif
3842 if (gen_trap_ifnofpu(dc)) {
3843 goto jmp_insn;
3845 gen_op_clear_ieee_excp_and_FTT();
3846 rs1 = GET_FIELD(insn, 13, 17);
3847 rs2 = GET_FIELD(insn, 27, 31);
3848 xop = GET_FIELD(insn, 18, 26);
3850 #ifdef TARGET_SPARC64
3851 #define FMOVR(sz) \
3852 do { \
3853 DisasCompare cmp; \
3854 cond = GET_FIELD_SP(insn, 10, 12); \
3855 cpu_src1 = get_src1(dc, insn); \
3856 gen_compare_reg(&cmp, cond, cpu_src1); \
3857 gen_fmov##sz(dc, &cmp, rd, rs2); \
3858 free_compare(&cmp); \
3859 } while (0)
3861 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3862 FMOVR(s);
3863 break;
3864 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3865 FMOVR(d);
3866 break;
3867 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3868 CHECK_FPU_FEATURE(dc, FLOAT128);
3869 FMOVR(q);
3870 break;
3872 #undef FMOVR
3873 #endif
3874 switch (xop) {
3875 #ifdef TARGET_SPARC64
3876 #define FMOVCC(fcc, sz) \
3877 do { \
3878 DisasCompare cmp; \
3879 cond = GET_FIELD_SP(insn, 14, 17); \
3880 gen_fcompare(&cmp, fcc, cond); \
3881 gen_fmov##sz(dc, &cmp, rd, rs2); \
3882 free_compare(&cmp); \
3883 } while (0)
3885 case 0x001: /* V9 fmovscc %fcc0 */
3886 FMOVCC(0, s);
3887 break;
3888 case 0x002: /* V9 fmovdcc %fcc0 */
3889 FMOVCC(0, d);
3890 break;
3891 case 0x003: /* V9 fmovqcc %fcc0 */
3892 CHECK_FPU_FEATURE(dc, FLOAT128);
3893 FMOVCC(0, q);
3894 break;
3895 case 0x041: /* V9 fmovscc %fcc1 */
3896 FMOVCC(1, s);
3897 break;
3898 case 0x042: /* V9 fmovdcc %fcc1 */
3899 FMOVCC(1, d);
3900 break;
3901 case 0x043: /* V9 fmovqcc %fcc1 */
3902 CHECK_FPU_FEATURE(dc, FLOAT128);
3903 FMOVCC(1, q);
3904 break;
3905 case 0x081: /* V9 fmovscc %fcc2 */
3906 FMOVCC(2, s);
3907 break;
3908 case 0x082: /* V9 fmovdcc %fcc2 */
3909 FMOVCC(2, d);
3910 break;
3911 case 0x083: /* V9 fmovqcc %fcc2 */
3912 CHECK_FPU_FEATURE(dc, FLOAT128);
3913 FMOVCC(2, q);
3914 break;
3915 case 0x0c1: /* V9 fmovscc %fcc3 */
3916 FMOVCC(3, s);
3917 break;
3918 case 0x0c2: /* V9 fmovdcc %fcc3 */
3919 FMOVCC(3, d);
3920 break;
3921 case 0x0c3: /* V9 fmovqcc %fcc3 */
3922 CHECK_FPU_FEATURE(dc, FLOAT128);
3923 FMOVCC(3, q);
3924 break;
3925 #undef FMOVCC
3926 #define FMOVCC(xcc, sz) \
3927 do { \
3928 DisasCompare cmp; \
3929 cond = GET_FIELD_SP(insn, 14, 17); \
3930 gen_compare(&cmp, xcc, cond, dc); \
3931 gen_fmov##sz(dc, &cmp, rd, rs2); \
3932 free_compare(&cmp); \
3933 } while (0)
3935 case 0x101: /* V9 fmovscc %icc */
3936 FMOVCC(0, s);
3937 break;
3938 case 0x102: /* V9 fmovdcc %icc */
3939 FMOVCC(0, d);
3940 break;
3941 case 0x103: /* V9 fmovqcc %icc */
3942 CHECK_FPU_FEATURE(dc, FLOAT128);
3943 FMOVCC(0, q);
3944 break;
3945 case 0x181: /* V9 fmovscc %xcc */
3946 FMOVCC(1, s);
3947 break;
3948 case 0x182: /* V9 fmovdcc %xcc */
3949 FMOVCC(1, d);
3950 break;
3951 case 0x183: /* V9 fmovqcc %xcc */
3952 CHECK_FPU_FEATURE(dc, FLOAT128);
3953 FMOVCC(1, q);
3954 break;
3955 #undef FMOVCC
3956 #endif
3957 case 0x51: /* fcmps, V9 %fcc */
3958 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3959 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3960 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3961 break;
3962 case 0x52: /* fcmpd, V9 %fcc */
3963 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3964 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3965 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3966 break;
3967 case 0x53: /* fcmpq, V9 %fcc */
3968 CHECK_FPU_FEATURE(dc, FLOAT128);
3969 gen_op_load_fpr_QT0(QFPREG(rs1));
3970 gen_op_load_fpr_QT1(QFPREG(rs2));
3971 gen_op_fcmpq(rd & 3);
3972 break;
3973 case 0x55: /* fcmpes, V9 %fcc */
3974 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3975 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3976 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3977 break;
3978 case 0x56: /* fcmped, V9 %fcc */
3979 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3980 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3981 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3982 break;
3983 case 0x57: /* fcmpeq, V9 %fcc */
3984 CHECK_FPU_FEATURE(dc, FLOAT128);
3985 gen_op_load_fpr_QT0(QFPREG(rs1));
3986 gen_op_load_fpr_QT1(QFPREG(rs2));
3987 gen_op_fcmpeq(rd & 3);
3988 break;
3989 default:
3990 goto illegal_insn;
3992 } else if (xop == 0x2) {
3993 TCGv dst = gen_dest_gpr(dc, rd);
3994 rs1 = GET_FIELD(insn, 13, 17);
3995 if (rs1 == 0) {
3996 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3997 if (IS_IMM) { /* immediate */
3998 simm = GET_FIELDs(insn, 19, 31);
3999 tcg_gen_movi_tl(dst, simm);
4000 gen_store_gpr(dc, rd, dst);
4001 } else { /* register */
4002 rs2 = GET_FIELD(insn, 27, 31);
4003 if (rs2 == 0) {
4004 tcg_gen_movi_tl(dst, 0);
4005 gen_store_gpr(dc, rd, dst);
4006 } else {
4007 cpu_src2 = gen_load_gpr(dc, rs2);
4008 gen_store_gpr(dc, rd, cpu_src2);
4011 } else {
4012 cpu_src1 = get_src1(dc, insn);
4013 if (IS_IMM) { /* immediate */
4014 simm = GET_FIELDs(insn, 19, 31);
4015 tcg_gen_ori_tl(dst, cpu_src1, simm);
4016 gen_store_gpr(dc, rd, dst);
4017 } else { /* register */
4018 rs2 = GET_FIELD(insn, 27, 31);
4019 if (rs2 == 0) {
4020 /* mov shortcut: or x, %g0, y -> mov x, y */
4021 gen_store_gpr(dc, rd, cpu_src1);
4022 } else {
4023 cpu_src2 = gen_load_gpr(dc, rs2);
4024 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4025 gen_store_gpr(dc, rd, dst);
4029 #ifdef TARGET_SPARC64
4030 } else if (xop == 0x25) { /* sll, V9 sllx */
4031 cpu_src1 = get_src1(dc, insn);
4032 if (IS_IMM) { /* immediate */
4033 simm = GET_FIELDs(insn, 20, 31);
4034 if (insn & (1 << 12)) {
4035 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4036 } else {
4037 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4039 } else { /* register */
4040 rs2 = GET_FIELD(insn, 27, 31);
4041 cpu_src2 = gen_load_gpr(dc, rs2);
4042 cpu_tmp0 = get_temp_tl(dc);
4043 if (insn & (1 << 12)) {
4044 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4045 } else {
4046 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4048 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4050 gen_store_gpr(dc, rd, cpu_dst);
4051 } else if (xop == 0x26) { /* srl, V9 srlx */
4052 cpu_src1 = get_src1(dc, insn);
4053 if (IS_IMM) { /* immediate */
4054 simm = GET_FIELDs(insn, 20, 31);
4055 if (insn & (1 << 12)) {
4056 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4057 } else {
4058 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4059 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4061 } else { /* register */
4062 rs2 = GET_FIELD(insn, 27, 31);
4063 cpu_src2 = gen_load_gpr(dc, rs2);
4064 cpu_tmp0 = get_temp_tl(dc);
4065 if (insn & (1 << 12)) {
4066 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4067 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4068 } else {
4069 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4070 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4071 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4074 gen_store_gpr(dc, rd, cpu_dst);
4075 } else if (xop == 0x27) { /* sra, V9 srax */
4076 cpu_src1 = get_src1(dc, insn);
4077 if (IS_IMM) { /* immediate */
4078 simm = GET_FIELDs(insn, 20, 31);
4079 if (insn & (1 << 12)) {
4080 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4081 } else {
4082 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4083 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4085 } else { /* register */
4086 rs2 = GET_FIELD(insn, 27, 31);
4087 cpu_src2 = gen_load_gpr(dc, rs2);
4088 cpu_tmp0 = get_temp_tl(dc);
4089 if (insn & (1 << 12)) {
4090 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4091 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4092 } else {
4093 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4094 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4095 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4098 gen_store_gpr(dc, rd, cpu_dst);
4099 #endif
4100 } else if (xop < 0x36) {
4101 if (xop < 0x20) {
4102 cpu_src1 = get_src1(dc, insn);
4103 cpu_src2 = get_src2(dc, insn);
4104 switch (xop & ~0x10) {
4105 case 0x0: /* add */
4106 if (xop & 0x10) {
4107 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4108 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4109 dc->cc_op = CC_OP_ADD;
4110 } else {
4111 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4113 break;
4114 case 0x1: /* and */
4115 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4116 if (xop & 0x10) {
4117 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4118 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4119 dc->cc_op = CC_OP_LOGIC;
4121 break;
4122 case 0x2: /* or */
4123 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4124 if (xop & 0x10) {
4125 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4126 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4127 dc->cc_op = CC_OP_LOGIC;
4129 break;
4130 case 0x3: /* xor */
4131 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4132 if (xop & 0x10) {
4133 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4134 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4135 dc->cc_op = CC_OP_LOGIC;
4137 break;
4138 case 0x4: /* sub */
4139 if (xop & 0x10) {
4140 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4141 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4142 dc->cc_op = CC_OP_SUB;
4143 } else {
4144 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4146 break;
4147 case 0x5: /* andn */
4148 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4149 if (xop & 0x10) {
4150 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4151 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4152 dc->cc_op = CC_OP_LOGIC;
4154 break;
4155 case 0x6: /* orn */
4156 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4157 if (xop & 0x10) {
4158 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4159 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4160 dc->cc_op = CC_OP_LOGIC;
4162 break;
4163 case 0x7: /* xorn */
4164 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4165 if (xop & 0x10) {
4166 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4167 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4168 dc->cc_op = CC_OP_LOGIC;
4170 break;
4171 case 0x8: /* addx, V9 addc */
4172 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4173 (xop & 0x10));
4174 break;
4175 #ifdef TARGET_SPARC64
4176 case 0x9: /* V9 mulx */
4177 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4178 break;
4179 #endif
4180 case 0xa: /* umul */
4181 CHECK_IU_FEATURE(dc, MUL);
4182 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4183 if (xop & 0x10) {
4184 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4185 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4186 dc->cc_op = CC_OP_LOGIC;
4188 break;
4189 case 0xb: /* smul */
4190 CHECK_IU_FEATURE(dc, MUL);
4191 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4192 if (xop & 0x10) {
4193 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4194 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4195 dc->cc_op = CC_OP_LOGIC;
4197 break;
4198 case 0xc: /* subx, V9 subc */
4199 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4200 (xop & 0x10));
4201 break;
4202 #ifdef TARGET_SPARC64
4203 case 0xd: /* V9 udivx */
4204 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4205 break;
4206 #endif
4207 case 0xe: /* udiv */
4208 CHECK_IU_FEATURE(dc, DIV);
4209 if (xop & 0x10) {
4210 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4211 cpu_src2);
4212 dc->cc_op = CC_OP_DIV;
4213 } else {
4214 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4215 cpu_src2);
4217 break;
4218 case 0xf: /* sdiv */
4219 CHECK_IU_FEATURE(dc, DIV);
4220 if (xop & 0x10) {
4221 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4222 cpu_src2);
4223 dc->cc_op = CC_OP_DIV;
4224 } else {
4225 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4226 cpu_src2);
4228 break;
4229 default:
4230 goto illegal_insn;
4232 gen_store_gpr(dc, rd, cpu_dst);
4233 } else {
4234 cpu_src1 = get_src1(dc, insn);
4235 cpu_src2 = get_src2(dc, insn);
4236 switch (xop) {
4237 case 0x20: /* taddcc */
4238 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4239 gen_store_gpr(dc, rd, cpu_dst);
4240 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4241 dc->cc_op = CC_OP_TADD;
4242 break;
4243 case 0x21: /* tsubcc */
4244 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4245 gen_store_gpr(dc, rd, cpu_dst);
4246 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4247 dc->cc_op = CC_OP_TSUB;
4248 break;
4249 case 0x22: /* taddcctv */
4250 gen_helper_taddcctv(cpu_dst, cpu_env,
4251 cpu_src1, cpu_src2);
4252 gen_store_gpr(dc, rd, cpu_dst);
4253 dc->cc_op = CC_OP_TADDTV;
4254 break;
4255 case 0x23: /* tsubcctv */
4256 gen_helper_tsubcctv(cpu_dst, cpu_env,
4257 cpu_src1, cpu_src2);
4258 gen_store_gpr(dc, rd, cpu_dst);
4259 dc->cc_op = CC_OP_TSUBTV;
4260 break;
4261 case 0x24: /* mulscc */
4262 update_psr(dc);
4263 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4264 gen_store_gpr(dc, rd, cpu_dst);
4265 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4266 dc->cc_op = CC_OP_ADD;
4267 break;
4268 #ifndef TARGET_SPARC64
4269 case 0x25: /* sll */
4270 if (IS_IMM) { /* immediate */
4271 simm = GET_FIELDs(insn, 20, 31);
4272 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4273 } else { /* register */
4274 cpu_tmp0 = get_temp_tl(dc);
4275 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4276 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4278 gen_store_gpr(dc, rd, cpu_dst);
4279 break;
4280 case 0x26: /* srl */
4281 if (IS_IMM) { /* immediate */
4282 simm = GET_FIELDs(insn, 20, 31);
4283 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4284 } else { /* register */
4285 cpu_tmp0 = get_temp_tl(dc);
4286 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4287 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4289 gen_store_gpr(dc, rd, cpu_dst);
4290 break;
4291 case 0x27: /* sra */
4292 if (IS_IMM) { /* immediate */
4293 simm = GET_FIELDs(insn, 20, 31);
4294 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4295 } else { /* register */
4296 cpu_tmp0 = get_temp_tl(dc);
4297 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4298 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4300 gen_store_gpr(dc, rd, cpu_dst);
4301 break;
4302 #endif
4303 case 0x30:
4305 cpu_tmp0 = get_temp_tl(dc);
4306 switch(rd) {
4307 case 0: /* wry */
4308 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4309 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4310 break;
4311 #ifndef TARGET_SPARC64
4312 case 0x01 ... 0x0f: /* undefined in the
4313 SPARCv8 manual, nop
4314 on the microSPARC
4315 II */
4316 case 0x10 ... 0x1f: /* implementation-dependent
4317 in the SPARCv8
4318 manual, nop on the
4319 microSPARC II */
4320 if ((rd == 0x13) && (dc->def->features &
4321 CPU_FEATURE_POWERDOWN)) {
4322 /* LEON3 power-down */
4323 save_state(dc);
4324 gen_helper_power_down(cpu_env);
4326 break;
4327 #else
4328 case 0x2: /* V9 wrccr */
4329 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4330 gen_helper_wrccr(cpu_env, cpu_tmp0);
4331 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4332 dc->cc_op = CC_OP_FLAGS;
4333 break;
4334 case 0x3: /* V9 wrasi */
4335 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4336 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4337 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4338 offsetof(CPUSPARCState, asi));
4339 /* End TB to notice changed ASI. */
4340 save_state(dc);
4341 gen_op_next_insn();
4342 tcg_gen_exit_tb(0);
4343 dc->is_br = 1;
4344 break;
4345 case 0x6: /* V9 wrfprs */
4346 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4347 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4348 dc->fprs_dirty = 0;
4349 save_state(dc);
4350 gen_op_next_insn();
4351 tcg_gen_exit_tb(0);
4352 dc->is_br = 1;
4353 break;
4354 case 0xf: /* V9 sir, nop if user */
4355 #if !defined(CONFIG_USER_ONLY)
4356 if (supervisor(dc)) {
4357 ; // XXX
4359 #endif
4360 break;
4361 case 0x13: /* Graphics Status */
4362 if (gen_trap_ifnofpu(dc)) {
4363 goto jmp_insn;
4365 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4366 break;
4367 case 0x14: /* Softint set */
4368 if (!supervisor(dc))
4369 goto illegal_insn;
4370 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4371 gen_helper_set_softint(cpu_env, cpu_tmp0);
4372 break;
4373 case 0x15: /* Softint clear */
4374 if (!supervisor(dc))
4375 goto illegal_insn;
4376 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4377 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4378 break;
4379 case 0x16: /* Softint write */
4380 if (!supervisor(dc))
4381 goto illegal_insn;
4382 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4383 gen_helper_write_softint(cpu_env, cpu_tmp0);
4384 break;
4385 case 0x17: /* Tick compare */
4386 #if !defined(CONFIG_USER_ONLY)
4387 if (!supervisor(dc))
4388 goto illegal_insn;
4389 #endif
4391 TCGv_ptr r_tickptr;
4393 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4394 cpu_src2);
4395 r_tickptr = tcg_temp_new_ptr();
4396 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4397 offsetof(CPUSPARCState, tick));
4398 gen_helper_tick_set_limit(r_tickptr,
4399 cpu_tick_cmpr);
4400 tcg_temp_free_ptr(r_tickptr);
4402 break;
4403 case 0x18: /* System tick */
4404 #if !defined(CONFIG_USER_ONLY)
4405 if (!supervisor(dc))
4406 goto illegal_insn;
4407 #endif
4409 TCGv_ptr r_tickptr;
4411 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4412 cpu_src2);
4413 r_tickptr = tcg_temp_new_ptr();
4414 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4415 offsetof(CPUSPARCState, stick));
4416 gen_helper_tick_set_count(r_tickptr,
4417 cpu_tmp0);
4418 tcg_temp_free_ptr(r_tickptr);
4420 break;
4421 case 0x19: /* System tick compare */
4422 #if !defined(CONFIG_USER_ONLY)
4423 if (!supervisor(dc))
4424 goto illegal_insn;
4425 #endif
4427 TCGv_ptr r_tickptr;
4429 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4430 cpu_src2);
4431 r_tickptr = tcg_temp_new_ptr();
4432 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4433 offsetof(CPUSPARCState, stick));
4434 gen_helper_tick_set_limit(r_tickptr,
4435 cpu_stick_cmpr);
4436 tcg_temp_free_ptr(r_tickptr);
4438 break;
4440 case 0x10: /* Performance Control */
4441 case 0x11: /* Performance Instrumentation
4442 Counter */
4443 case 0x12: /* Dispatch Control */
4444 #endif
4445 default:
4446 goto illegal_insn;
4449 break;
4450 #if !defined(CONFIG_USER_ONLY)
4451 case 0x31: /* wrpsr, V9 saved, restored */
4453 if (!supervisor(dc))
4454 goto priv_insn;
4455 #ifdef TARGET_SPARC64
4456 switch (rd) {
4457 case 0:
4458 gen_helper_saved(cpu_env);
4459 break;
4460 case 1:
4461 gen_helper_restored(cpu_env);
4462 break;
4463 case 2: /* UA2005 allclean */
4464 case 3: /* UA2005 otherw */
4465 case 4: /* UA2005 normalw */
4466 case 5: /* UA2005 invalw */
4467 // XXX
4468 default:
4469 goto illegal_insn;
4471 #else
4472 cpu_tmp0 = get_temp_tl(dc);
4473 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4474 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4475 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4476 dc->cc_op = CC_OP_FLAGS;
4477 save_state(dc);
4478 gen_op_next_insn();
4479 tcg_gen_exit_tb(0);
4480 dc->is_br = 1;
4481 #endif
4483 break;
4484 case 0x32: /* wrwim, V9 wrpr */
4486 if (!supervisor(dc))
4487 goto priv_insn;
4488 cpu_tmp0 = get_temp_tl(dc);
4489 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4490 #ifdef TARGET_SPARC64
4491 switch (rd) {
4492 case 0: // tpc
4494 TCGv_ptr r_tsptr;
4496 r_tsptr = tcg_temp_new_ptr();
4497 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4498 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4499 offsetof(trap_state, tpc));
4500 tcg_temp_free_ptr(r_tsptr);
4502 break;
4503 case 1: // tnpc
4505 TCGv_ptr r_tsptr;
4507 r_tsptr = tcg_temp_new_ptr();
4508 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4509 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4510 offsetof(trap_state, tnpc));
4511 tcg_temp_free_ptr(r_tsptr);
4513 break;
4514 case 2: // tstate
4516 TCGv_ptr r_tsptr;
4518 r_tsptr = tcg_temp_new_ptr();
4519 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4520 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4521 offsetof(trap_state,
4522 tstate));
4523 tcg_temp_free_ptr(r_tsptr);
4525 break;
4526 case 3: // tt
4528 TCGv_ptr r_tsptr;
4530 r_tsptr = tcg_temp_new_ptr();
4531 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4532 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4533 offsetof(trap_state, tt));
4534 tcg_temp_free_ptr(r_tsptr);
4536 break;
4537 case 4: // tick
4539 TCGv_ptr r_tickptr;
4541 r_tickptr = tcg_temp_new_ptr();
4542 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4543 offsetof(CPUSPARCState, tick));
4544 gen_helper_tick_set_count(r_tickptr,
4545 cpu_tmp0);
4546 tcg_temp_free_ptr(r_tickptr);
4548 break;
4549 case 5: // tba
4550 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4551 break;
4552 case 6: // pstate
4553 save_state(dc);
4554 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4555 dc->npc = DYNAMIC_PC;
4556 break;
4557 case 7: // tl
4558 save_state(dc);
4559 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4560 offsetof(CPUSPARCState, tl));
4561 dc->npc = DYNAMIC_PC;
4562 break;
4563 case 8: // pil
4564 gen_helper_wrpil(cpu_env, cpu_tmp0);
4565 break;
4566 case 9: // cwp
4567 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4568 break;
4569 case 10: // cansave
4570 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4571 offsetof(CPUSPARCState,
4572 cansave));
4573 break;
4574 case 11: // canrestore
4575 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4576 offsetof(CPUSPARCState,
4577 canrestore));
4578 break;
4579 case 12: // cleanwin
4580 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4581 offsetof(CPUSPARCState,
4582 cleanwin));
4583 break;
4584 case 13: // otherwin
4585 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4586 offsetof(CPUSPARCState,
4587 otherwin));
4588 break;
4589 case 14: // wstate
4590 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4591 offsetof(CPUSPARCState,
4592 wstate));
4593 break;
4594 case 16: // UA2005 gl
4595 CHECK_IU_FEATURE(dc, GL);
4596 gen_helper_wrgl(cpu_env, cpu_tmp0);
4597 break;
4598 case 26: // UA2005 strand status
4599 CHECK_IU_FEATURE(dc, HYPV);
4600 if (!hypervisor(dc))
4601 goto priv_insn;
4602 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4603 break;
4604 default:
4605 goto illegal_insn;
4607 #else
4608 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4609 if (dc->def->nwindows != 32) {
4610 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4611 (1 << dc->def->nwindows) - 1);
4613 #endif
4615 break;
4616 case 0x33: /* wrtbr, UA2005 wrhpr */
4618 #ifndef TARGET_SPARC64
4619 if (!supervisor(dc))
4620 goto priv_insn;
4621 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4622 #else
4623 CHECK_IU_FEATURE(dc, HYPV);
4624 if (!hypervisor(dc))
4625 goto priv_insn;
4626 cpu_tmp0 = get_temp_tl(dc);
4627 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4628 switch (rd) {
4629 case 0: // hpstate
4630 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4631 offsetof(CPUSPARCState,
4632 hpstate));
4633 save_state(dc);
4634 gen_op_next_insn();
4635 tcg_gen_exit_tb(0);
4636 dc->is_br = 1;
4637 break;
4638 case 1: // htstate
4639 // XXX gen_op_wrhtstate();
4640 break;
4641 case 3: // hintp
4642 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4643 break;
4644 case 5: // htba
4645 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4646 break;
4647 case 31: // hstick_cmpr
4649 TCGv_ptr r_tickptr;
4651 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4652 r_tickptr = tcg_temp_new_ptr();
4653 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4654 offsetof(CPUSPARCState, hstick));
4655 gen_helper_tick_set_limit(r_tickptr,
4656 cpu_hstick_cmpr);
4657 tcg_temp_free_ptr(r_tickptr);
4659 break;
4660 case 6: // hver readonly
4661 default:
4662 goto illegal_insn;
4664 #endif
4666 break;
4667 #endif
4668 #ifdef TARGET_SPARC64
4669 case 0x2c: /* V9 movcc */
4671 int cc = GET_FIELD_SP(insn, 11, 12);
4672 int cond = GET_FIELD_SP(insn, 14, 17);
4673 DisasCompare cmp;
4674 TCGv dst;
4676 if (insn & (1 << 18)) {
4677 if (cc == 0) {
4678 gen_compare(&cmp, 0, cond, dc);
4679 } else if (cc == 2) {
4680 gen_compare(&cmp, 1, cond, dc);
4681 } else {
4682 goto illegal_insn;
4684 } else {
4685 gen_fcompare(&cmp, cc, cond);
4688 /* The get_src2 above loaded the normal 13-bit
4689 immediate field, not the 11-bit field we have
4690 in movcc. But it did handle the reg case. */
4691 if (IS_IMM) {
4692 simm = GET_FIELD_SPs(insn, 0, 10);
4693 tcg_gen_movi_tl(cpu_src2, simm);
4696 dst = gen_load_gpr(dc, rd);
4697 tcg_gen_movcond_tl(cmp.cond, dst,
4698 cmp.c1, cmp.c2,
4699 cpu_src2, dst);
4700 free_compare(&cmp);
4701 gen_store_gpr(dc, rd, dst);
4702 break;
4704 case 0x2d: /* V9 sdivx */
4705 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4706 gen_store_gpr(dc, rd, cpu_dst);
4707 break;
4708 case 0x2e: /* V9 popc */
4709 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4710 gen_store_gpr(dc, rd, cpu_dst);
4711 break;
4712 case 0x2f: /* V9 movr */
4714 int cond = GET_FIELD_SP(insn, 10, 12);
4715 DisasCompare cmp;
4716 TCGv dst;
4718 gen_compare_reg(&cmp, cond, cpu_src1);
4720 /* The get_src2 above loaded the normal 13-bit
4721 immediate field, not the 10-bit field we have
4722 in movr. But it did handle the reg case. */
4723 if (IS_IMM) {
4724 simm = GET_FIELD_SPs(insn, 0, 9);
4725 tcg_gen_movi_tl(cpu_src2, simm);
4728 dst = gen_load_gpr(dc, rd);
4729 tcg_gen_movcond_tl(cmp.cond, dst,
4730 cmp.c1, cmp.c2,
4731 cpu_src2, dst);
4732 free_compare(&cmp);
4733 gen_store_gpr(dc, rd, dst);
4734 break;
4736 #endif
4737 default:
4738 goto illegal_insn;
4741 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4742 #ifdef TARGET_SPARC64
4743 int opf = GET_FIELD_SP(insn, 5, 13);
4744 rs1 = GET_FIELD(insn, 13, 17);
4745 rs2 = GET_FIELD(insn, 27, 31);
4746 if (gen_trap_ifnofpu(dc)) {
4747 goto jmp_insn;
4750 switch (opf) {
4751 case 0x000: /* VIS I edge8cc */
4752 CHECK_FPU_FEATURE(dc, VIS1);
4753 cpu_src1 = gen_load_gpr(dc, rs1);
4754 cpu_src2 = gen_load_gpr(dc, rs2);
4755 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4756 gen_store_gpr(dc, rd, cpu_dst);
4757 break;
4758 case 0x001: /* VIS II edge8n */
4759 CHECK_FPU_FEATURE(dc, VIS2);
4760 cpu_src1 = gen_load_gpr(dc, rs1);
4761 cpu_src2 = gen_load_gpr(dc, rs2);
4762 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4763 gen_store_gpr(dc, rd, cpu_dst);
4764 break;
4765 case 0x002: /* VIS I edge8lcc */
4766 CHECK_FPU_FEATURE(dc, VIS1);
4767 cpu_src1 = gen_load_gpr(dc, rs1);
4768 cpu_src2 = gen_load_gpr(dc, rs2);
4769 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4770 gen_store_gpr(dc, rd, cpu_dst);
4771 break;
4772 case 0x003: /* VIS II edge8ln */
4773 CHECK_FPU_FEATURE(dc, VIS2);
4774 cpu_src1 = gen_load_gpr(dc, rs1);
4775 cpu_src2 = gen_load_gpr(dc, rs2);
4776 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4777 gen_store_gpr(dc, rd, cpu_dst);
4778 break;
4779 case 0x004: /* VIS I edge16cc */
4780 CHECK_FPU_FEATURE(dc, VIS1);
4781 cpu_src1 = gen_load_gpr(dc, rs1);
4782 cpu_src2 = gen_load_gpr(dc, rs2);
4783 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4784 gen_store_gpr(dc, rd, cpu_dst);
4785 break;
4786 case 0x005: /* VIS II edge16n */
4787 CHECK_FPU_FEATURE(dc, VIS2);
4788 cpu_src1 = gen_load_gpr(dc, rs1);
4789 cpu_src2 = gen_load_gpr(dc, rs2);
4790 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4791 gen_store_gpr(dc, rd, cpu_dst);
4792 break;
4793 case 0x006: /* VIS I edge16lcc */
4794 CHECK_FPU_FEATURE(dc, VIS1);
4795 cpu_src1 = gen_load_gpr(dc, rs1);
4796 cpu_src2 = gen_load_gpr(dc, rs2);
4797 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4798 gen_store_gpr(dc, rd, cpu_dst);
4799 break;
4800 case 0x007: /* VIS II edge16ln */
4801 CHECK_FPU_FEATURE(dc, VIS2);
4802 cpu_src1 = gen_load_gpr(dc, rs1);
4803 cpu_src2 = gen_load_gpr(dc, rs2);
4804 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4805 gen_store_gpr(dc, rd, cpu_dst);
4806 break;
4807 case 0x008: /* VIS I edge32cc */
4808 CHECK_FPU_FEATURE(dc, VIS1);
4809 cpu_src1 = gen_load_gpr(dc, rs1);
4810 cpu_src2 = gen_load_gpr(dc, rs2);
4811 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4812 gen_store_gpr(dc, rd, cpu_dst);
4813 break;
4814 case 0x009: /* VIS II edge32n */
4815 CHECK_FPU_FEATURE(dc, VIS2);
4816 cpu_src1 = gen_load_gpr(dc, rs1);
4817 cpu_src2 = gen_load_gpr(dc, rs2);
4818 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4819 gen_store_gpr(dc, rd, cpu_dst);
4820 break;
4821 case 0x00a: /* VIS I edge32lcc */
4822 CHECK_FPU_FEATURE(dc, VIS1);
4823 cpu_src1 = gen_load_gpr(dc, rs1);
4824 cpu_src2 = gen_load_gpr(dc, rs2);
4825 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4826 gen_store_gpr(dc, rd, cpu_dst);
4827 break;
4828 case 0x00b: /* VIS II edge32ln */
4829 CHECK_FPU_FEATURE(dc, VIS2);
4830 cpu_src1 = gen_load_gpr(dc, rs1);
4831 cpu_src2 = gen_load_gpr(dc, rs2);
4832 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4833 gen_store_gpr(dc, rd, cpu_dst);
4834 break;
4835 case 0x010: /* VIS I array8 */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 cpu_src1 = gen_load_gpr(dc, rs1);
4838 cpu_src2 = gen_load_gpr(dc, rs2);
4839 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4840 gen_store_gpr(dc, rd, cpu_dst);
4841 break;
4842 case 0x012: /* VIS I array16 */
4843 CHECK_FPU_FEATURE(dc, VIS1);
4844 cpu_src1 = gen_load_gpr(dc, rs1);
4845 cpu_src2 = gen_load_gpr(dc, rs2);
4846 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4847 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4848 gen_store_gpr(dc, rd, cpu_dst);
4849 break;
4850 case 0x014: /* VIS I array32 */
4851 CHECK_FPU_FEATURE(dc, VIS1);
4852 cpu_src1 = gen_load_gpr(dc, rs1);
4853 cpu_src2 = gen_load_gpr(dc, rs2);
4854 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4855 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4856 gen_store_gpr(dc, rd, cpu_dst);
4857 break;
4858 case 0x018: /* VIS I alignaddr */
4859 CHECK_FPU_FEATURE(dc, VIS1);
4860 cpu_src1 = gen_load_gpr(dc, rs1);
4861 cpu_src2 = gen_load_gpr(dc, rs2);
4862 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4863 gen_store_gpr(dc, rd, cpu_dst);
4864 break;
4865 case 0x01a: /* VIS I alignaddrl */
4866 CHECK_FPU_FEATURE(dc, VIS1);
4867 cpu_src1 = gen_load_gpr(dc, rs1);
4868 cpu_src2 = gen_load_gpr(dc, rs2);
4869 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4870 gen_store_gpr(dc, rd, cpu_dst);
4871 break;
4872 case 0x019: /* VIS II bmask */
4873 CHECK_FPU_FEATURE(dc, VIS2);
4874 cpu_src1 = gen_load_gpr(dc, rs1);
4875 cpu_src2 = gen_load_gpr(dc, rs2);
4876 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4877 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4878 gen_store_gpr(dc, rd, cpu_dst);
4879 break;
4880 case 0x020: /* VIS I fcmple16 */
4881 CHECK_FPU_FEATURE(dc, VIS1);
4882 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4883 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4884 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4885 gen_store_gpr(dc, rd, cpu_dst);
4886 break;
4887 case 0x022: /* VIS I fcmpne16 */
4888 CHECK_FPU_FEATURE(dc, VIS1);
4889 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4890 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4891 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4892 gen_store_gpr(dc, rd, cpu_dst);
4893 break;
4894 case 0x024: /* VIS I fcmple32 */
4895 CHECK_FPU_FEATURE(dc, VIS1);
4896 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4897 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4898 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4899 gen_store_gpr(dc, rd, cpu_dst);
4900 break;
4901 case 0x026: /* VIS I fcmpne32 */
4902 CHECK_FPU_FEATURE(dc, VIS1);
4903 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4904 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4905 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4906 gen_store_gpr(dc, rd, cpu_dst);
4907 break;
4908 case 0x028: /* VIS I fcmpgt16 */
4909 CHECK_FPU_FEATURE(dc, VIS1);
4910 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4911 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4912 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4913 gen_store_gpr(dc, rd, cpu_dst);
4914 break;
4915 case 0x02a: /* VIS I fcmpeq16 */
4916 CHECK_FPU_FEATURE(dc, VIS1);
4917 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4918 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4919 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4920 gen_store_gpr(dc, rd, cpu_dst);
4921 break;
4922 case 0x02c: /* VIS I fcmpgt32 */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4925 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4926 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4927 gen_store_gpr(dc, rd, cpu_dst);
4928 break;
4929 case 0x02e: /* VIS I fcmpeq32 */
4930 CHECK_FPU_FEATURE(dc, VIS1);
4931 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4932 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4933 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4934 gen_store_gpr(dc, rd, cpu_dst);
4935 break;
4936 case 0x031: /* VIS I fmul8x16 */
4937 CHECK_FPU_FEATURE(dc, VIS1);
4938 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4939 break;
4940 case 0x033: /* VIS I fmul8x16au */
4941 CHECK_FPU_FEATURE(dc, VIS1);
4942 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4943 break;
4944 case 0x035: /* VIS I fmul8x16al */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4947 break;
4948 case 0x036: /* VIS I fmul8sux16 */
4949 CHECK_FPU_FEATURE(dc, VIS1);
4950 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4951 break;
4952 case 0x037: /* VIS I fmul8ulx16 */
4953 CHECK_FPU_FEATURE(dc, VIS1);
4954 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4955 break;
4956 case 0x038: /* VIS I fmuld8sux16 */
4957 CHECK_FPU_FEATURE(dc, VIS1);
4958 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4959 break;
4960 case 0x039: /* VIS I fmuld8ulx16 */
4961 CHECK_FPU_FEATURE(dc, VIS1);
4962 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4963 break;
4964 case 0x03a: /* VIS I fpack32 */
4965 CHECK_FPU_FEATURE(dc, VIS1);
4966 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4967 break;
4968 case 0x03b: /* VIS I fpack16 */
4969 CHECK_FPU_FEATURE(dc, VIS1);
4970 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4971 cpu_dst_32 = gen_dest_fpr_F(dc);
4972 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4973 gen_store_fpr_F(dc, rd, cpu_dst_32);
4974 break;
4975 case 0x03d: /* VIS I fpackfix */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4978 cpu_dst_32 = gen_dest_fpr_F(dc);
4979 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4980 gen_store_fpr_F(dc, rd, cpu_dst_32);
4981 break;
4982 case 0x03e: /* VIS I pdist */
4983 CHECK_FPU_FEATURE(dc, VIS1);
4984 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4985 break;
4986 case 0x048: /* VIS I faligndata */
4987 CHECK_FPU_FEATURE(dc, VIS1);
4988 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4989 break;
4990 case 0x04b: /* VIS I fpmerge */
4991 CHECK_FPU_FEATURE(dc, VIS1);
4992 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4993 break;
4994 case 0x04c: /* VIS II bshuffle */
4995 CHECK_FPU_FEATURE(dc, VIS2);
4996 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4997 break;
4998 case 0x04d: /* VIS I fexpand */
4999 CHECK_FPU_FEATURE(dc, VIS1);
5000 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
5001 break;
5002 case 0x050: /* VIS I fpadd16 */
5003 CHECK_FPU_FEATURE(dc, VIS1);
5004 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
5005 break;
5006 case 0x051: /* VIS I fpadd16s */
5007 CHECK_FPU_FEATURE(dc, VIS1);
5008 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5009 break;
5010 case 0x052: /* VIS I fpadd32 */
5011 CHECK_FPU_FEATURE(dc, VIS1);
5012 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5013 break;
5014 case 0x053: /* VIS I fpadd32s */
5015 CHECK_FPU_FEATURE(dc, VIS1);
5016 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5017 break;
5018 case 0x054: /* VIS I fpsub16 */
5019 CHECK_FPU_FEATURE(dc, VIS1);
5020 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5021 break;
5022 case 0x055: /* VIS I fpsub16s */
5023 CHECK_FPU_FEATURE(dc, VIS1);
5024 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5025 break;
5026 case 0x056: /* VIS I fpsub32 */
5027 CHECK_FPU_FEATURE(dc, VIS1);
5028 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5029 break;
5030 case 0x057: /* VIS I fpsub32s */
5031 CHECK_FPU_FEATURE(dc, VIS1);
5032 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5033 break;
5034 case 0x060: /* VIS I fzero */
5035 CHECK_FPU_FEATURE(dc, VIS1);
5036 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5037 tcg_gen_movi_i64(cpu_dst_64, 0);
5038 gen_store_fpr_D(dc, rd, cpu_dst_64);
5039 break;
5040 case 0x061: /* VIS I fzeros */
5041 CHECK_FPU_FEATURE(dc, VIS1);
5042 cpu_dst_32 = gen_dest_fpr_F(dc);
5043 tcg_gen_movi_i32(cpu_dst_32, 0);
5044 gen_store_fpr_F(dc, rd, cpu_dst_32);
5045 break;
5046 case 0x062: /* VIS I fnor */
5047 CHECK_FPU_FEATURE(dc, VIS1);
5048 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5049 break;
5050 case 0x063: /* VIS I fnors */
5051 CHECK_FPU_FEATURE(dc, VIS1);
5052 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5053 break;
5054 case 0x064: /* VIS I fandnot2 */
5055 CHECK_FPU_FEATURE(dc, VIS1);
5056 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5057 break;
5058 case 0x065: /* VIS I fandnot2s */
5059 CHECK_FPU_FEATURE(dc, VIS1);
5060 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5061 break;
5062 case 0x066: /* VIS I fnot2 */
5063 CHECK_FPU_FEATURE(dc, VIS1);
5064 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5065 break;
5066 case 0x067: /* VIS I fnot2s */
5067 CHECK_FPU_FEATURE(dc, VIS1);
5068 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5069 break;
5070 case 0x068: /* VIS I fandnot1 */
5071 CHECK_FPU_FEATURE(dc, VIS1);
5072 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5073 break;
5074 case 0x069: /* VIS I fandnot1s */
5075 CHECK_FPU_FEATURE(dc, VIS1);
5076 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5077 break;
5078 case 0x06a: /* VIS I fnot1 */
5079 CHECK_FPU_FEATURE(dc, VIS1);
5080 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5081 break;
5082 case 0x06b: /* VIS I fnot1s */
5083 CHECK_FPU_FEATURE(dc, VIS1);
5084 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5085 break;
5086 case 0x06c: /* VIS I fxor */
5087 CHECK_FPU_FEATURE(dc, VIS1);
5088 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5089 break;
5090 case 0x06d: /* VIS I fxors */
5091 CHECK_FPU_FEATURE(dc, VIS1);
5092 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5093 break;
5094 case 0x06e: /* VIS I fnand */
5095 CHECK_FPU_FEATURE(dc, VIS1);
5096 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5097 break;
5098 case 0x06f: /* VIS I fnands */
5099 CHECK_FPU_FEATURE(dc, VIS1);
5100 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5101 break;
5102 case 0x070: /* VIS I fand */
5103 CHECK_FPU_FEATURE(dc, VIS1);
5104 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5105 break;
5106 case 0x071: /* VIS I fands */
5107 CHECK_FPU_FEATURE(dc, VIS1);
5108 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5109 break;
5110 case 0x072: /* VIS I fxnor */
5111 CHECK_FPU_FEATURE(dc, VIS1);
5112 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5113 break;
5114 case 0x073: /* VIS I fxnors */
5115 CHECK_FPU_FEATURE(dc, VIS1);
5116 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5117 break;
5118 case 0x074: /* VIS I fsrc1 */
5119 CHECK_FPU_FEATURE(dc, VIS1);
5120 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5121 gen_store_fpr_D(dc, rd, cpu_src1_64);
5122 break;
5123 case 0x075: /* VIS I fsrc1s */
5124 CHECK_FPU_FEATURE(dc, VIS1);
5125 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5126 gen_store_fpr_F(dc, rd, cpu_src1_32);
5127 break;
5128 case 0x076: /* VIS I fornot2 */
5129 CHECK_FPU_FEATURE(dc, VIS1);
5130 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5131 break;
5132 case 0x077: /* VIS I fornot2s */
5133 CHECK_FPU_FEATURE(dc, VIS1);
5134 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5135 break;
5136 case 0x078: /* VIS I fsrc2 */
5137 CHECK_FPU_FEATURE(dc, VIS1);
5138 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5139 gen_store_fpr_D(dc, rd, cpu_src1_64);
5140 break;
5141 case 0x079: /* VIS I fsrc2s */
5142 CHECK_FPU_FEATURE(dc, VIS1);
5143 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5144 gen_store_fpr_F(dc, rd, cpu_src1_32);
5145 break;
5146 case 0x07a: /* VIS I fornot1 */
5147 CHECK_FPU_FEATURE(dc, VIS1);
5148 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5149 break;
5150 case 0x07b: /* VIS I fornot1s */
5151 CHECK_FPU_FEATURE(dc, VIS1);
5152 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5153 break;
5154 case 0x07c: /* VIS I for */
5155 CHECK_FPU_FEATURE(dc, VIS1);
5156 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5157 break;
5158 case 0x07d: /* VIS I fors */
5159 CHECK_FPU_FEATURE(dc, VIS1);
5160 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5161 break;
5162 case 0x07e: /* VIS I fone */
5163 CHECK_FPU_FEATURE(dc, VIS1);
5164 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5165 tcg_gen_movi_i64(cpu_dst_64, -1);
5166 gen_store_fpr_D(dc, rd, cpu_dst_64);
5167 break;
5168 case 0x07f: /* VIS I fones */
5169 CHECK_FPU_FEATURE(dc, VIS1);
5170 cpu_dst_32 = gen_dest_fpr_F(dc);
5171 tcg_gen_movi_i32(cpu_dst_32, -1);
5172 gen_store_fpr_F(dc, rd, cpu_dst_32);
5173 break;
5174 case 0x080: /* VIS I shutdown */
5175 case 0x081: /* VIS II siam */
5176 // XXX
5177 goto illegal_insn;
5178 default:
5179 goto illegal_insn;
5181 #else
5182 goto ncp_insn;
5183 #endif
5184 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5185 #ifdef TARGET_SPARC64
5186 goto illegal_insn;
5187 #else
5188 goto ncp_insn;
5189 #endif
5190 #ifdef TARGET_SPARC64
5191 } else if (xop == 0x39) { /* V9 return */
5192 save_state(dc);
5193 cpu_src1 = get_src1(dc, insn);
5194 cpu_tmp0 = get_temp_tl(dc);
5195 if (IS_IMM) { /* immediate */
5196 simm = GET_FIELDs(insn, 19, 31);
5197 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5198 } else { /* register */
5199 rs2 = GET_FIELD(insn, 27, 31);
5200 if (rs2) {
5201 cpu_src2 = gen_load_gpr(dc, rs2);
5202 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5203 } else {
5204 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5207 gen_helper_restore(cpu_env);
5208 gen_mov_pc_npc(dc);
5209 gen_check_align(cpu_tmp0, 3);
5210 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5211 dc->npc = DYNAMIC_PC;
5212 goto jmp_insn;
5213 #endif
5214 } else {
5215 cpu_src1 = get_src1(dc, insn);
5216 cpu_tmp0 = get_temp_tl(dc);
5217 if (IS_IMM) { /* immediate */
5218 simm = GET_FIELDs(insn, 19, 31);
5219 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5220 } else { /* register */
5221 rs2 = GET_FIELD(insn, 27, 31);
5222 if (rs2) {
5223 cpu_src2 = gen_load_gpr(dc, rs2);
5224 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5225 } else {
5226 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5229 switch (xop) {
5230 case 0x38: /* jmpl */
5232 TCGv t = gen_dest_gpr(dc, rd);
5233 tcg_gen_movi_tl(t, dc->pc);
5234 gen_store_gpr(dc, rd, t);
5236 gen_mov_pc_npc(dc);
5237 gen_check_align(cpu_tmp0, 3);
5238 gen_address_mask(dc, cpu_tmp0);
5239 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5240 dc->npc = DYNAMIC_PC;
5242 goto jmp_insn;
5243 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5244 case 0x39: /* rett, V9 return */
5246 if (!supervisor(dc))
5247 goto priv_insn;
5248 gen_mov_pc_npc(dc);
5249 gen_check_align(cpu_tmp0, 3);
5250 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5251 dc->npc = DYNAMIC_PC;
5252 gen_helper_rett(cpu_env);
5254 goto jmp_insn;
5255 #endif
5256 case 0x3b: /* flush */
5257 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5258 goto unimp_flush;
5259 /* nop */
5260 break;
5261 case 0x3c: /* save */
5262 gen_helper_save(cpu_env);
5263 gen_store_gpr(dc, rd, cpu_tmp0);
5264 break;
5265 case 0x3d: /* restore */
5266 gen_helper_restore(cpu_env);
5267 gen_store_gpr(dc, rd, cpu_tmp0);
5268 break;
5269 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5270 case 0x3e: /* V9 done/retry */
5272 switch (rd) {
5273 case 0:
5274 if (!supervisor(dc))
5275 goto priv_insn;
5276 dc->npc = DYNAMIC_PC;
5277 dc->pc = DYNAMIC_PC;
5278 gen_helper_done(cpu_env);
5279 goto jmp_insn;
5280 case 1:
5281 if (!supervisor(dc))
5282 goto priv_insn;
5283 dc->npc = DYNAMIC_PC;
5284 dc->pc = DYNAMIC_PC;
5285 gen_helper_retry(cpu_env);
5286 goto jmp_insn;
5287 default:
5288 goto illegal_insn;
5291 break;
5292 #endif
5293 default:
5294 goto illegal_insn;
5297 break;
5299 break;
5300 case 3: /* load/store instructions */
5302 unsigned int xop = GET_FIELD(insn, 7, 12);
5303 /* ??? gen_address_mask prevents us from using a source
5304 register directly. Always generate a temporary. */
5305 TCGv cpu_addr = get_temp_tl(dc);
5307 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5308 if (xop == 0x3c || xop == 0x3e) {
5309 /* V9 casa/casxa : no offset */
5310 } else if (IS_IMM) { /* immediate */
5311 simm = GET_FIELDs(insn, 19, 31);
5312 if (simm != 0) {
5313 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5315 } else { /* register */
5316 rs2 = GET_FIELD(insn, 27, 31);
5317 if (rs2 != 0) {
5318 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5321 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5322 (xop > 0x17 && xop <= 0x1d ) ||
5323 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5324 TCGv cpu_val = gen_dest_gpr(dc, rd);
5326 switch (xop) {
5327 case 0x0: /* ld, V9 lduw, load unsigned word */
5328 gen_address_mask(dc, cpu_addr);
5329 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5330 break;
5331 case 0x1: /* ldub, load unsigned byte */
5332 gen_address_mask(dc, cpu_addr);
5333 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5334 break;
5335 case 0x2: /* lduh, load unsigned halfword */
5336 gen_address_mask(dc, cpu_addr);
5337 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5338 break;
5339 case 0x3: /* ldd, load double word */
5340 if (rd & 1)
5341 goto illegal_insn;
5342 else {
5343 TCGv_i64 t64;
5345 gen_address_mask(dc, cpu_addr);
5346 t64 = tcg_temp_new_i64();
5347 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5348 tcg_gen_trunc_i64_tl(cpu_val, t64);
5349 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5350 gen_store_gpr(dc, rd + 1, cpu_val);
5351 tcg_gen_shri_i64(t64, t64, 32);
5352 tcg_gen_trunc_i64_tl(cpu_val, t64);
5353 tcg_temp_free_i64(t64);
5354 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5356 break;
5357 case 0x9: /* ldsb, load signed byte */
5358 gen_address_mask(dc, cpu_addr);
5359 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5360 break;
5361 case 0xa: /* ldsh, load signed halfword */
5362 gen_address_mask(dc, cpu_addr);
5363 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5364 break;
5365 case 0xd: /* ldstub */
5366 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5367 break;
5368 case 0x0f:
5369 /* swap, swap register with memory. Also atomically */
5370 CHECK_IU_FEATURE(dc, SWAP);
5371 cpu_src1 = gen_load_gpr(dc, rd);
5372 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5373 dc->mem_idx, MO_TEUL);
5374 break;
5375 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5376 case 0x10: /* lda, V9 lduwa, load word alternate */
5377 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5378 break;
5379 case 0x11: /* lduba, load unsigned byte alternate */
5380 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5381 break;
5382 case 0x12: /* lduha, load unsigned halfword alternate */
5383 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5384 break;
5385 case 0x13: /* ldda, load double word alternate */
5386 if (rd & 1) {
5387 goto illegal_insn;
5389 gen_ldda_asi(dc, cpu_addr, insn, rd);
5390 goto skip_move;
5391 case 0x19: /* ldsba, load signed byte alternate */
5392 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5393 break;
5394 case 0x1a: /* ldsha, load signed halfword alternate */
5395 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5396 break;
5397 case 0x1d: /* ldstuba -- XXX: should be atomically */
5398 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5399 break;
5400 case 0x1f: /* swapa, swap reg with alt. memory. Also
5401 atomically */
5402 CHECK_IU_FEATURE(dc, SWAP);
5403 cpu_src1 = gen_load_gpr(dc, rd);
5404 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5405 break;
5407 #ifndef TARGET_SPARC64
5408 case 0x30: /* ldc */
5409 case 0x31: /* ldcsr */
5410 case 0x33: /* lddc */
5411 goto ncp_insn;
5412 #endif
5413 #endif
5414 #ifdef TARGET_SPARC64
5415 case 0x08: /* V9 ldsw */
5416 gen_address_mask(dc, cpu_addr);
5417 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5418 break;
5419 case 0x0b: /* V9 ldx */
5420 gen_address_mask(dc, cpu_addr);
5421 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5422 break;
5423 case 0x18: /* V9 ldswa */
5424 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5425 break;
5426 case 0x1b: /* V9 ldxa */
5427 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5428 break;
5429 case 0x2d: /* V9 prefetch, no effect */
5430 goto skip_move;
5431 case 0x30: /* V9 ldfa */
5432 if (gen_trap_ifnofpu(dc)) {
5433 goto jmp_insn;
5435 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5436 gen_update_fprs_dirty(dc, rd);
5437 goto skip_move;
5438 case 0x33: /* V9 lddfa */
5439 if (gen_trap_ifnofpu(dc)) {
5440 goto jmp_insn;
5442 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5443 gen_update_fprs_dirty(dc, DFPREG(rd));
5444 goto skip_move;
5445 case 0x3d: /* V9 prefetcha, no effect */
5446 goto skip_move;
5447 case 0x32: /* V9 ldqfa */
5448 CHECK_FPU_FEATURE(dc, FLOAT128);
5449 if (gen_trap_ifnofpu(dc)) {
5450 goto jmp_insn;
5452 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5453 gen_update_fprs_dirty(dc, QFPREG(rd));
5454 goto skip_move;
5455 #endif
5456 default:
5457 goto illegal_insn;
5459 gen_store_gpr(dc, rd, cpu_val);
5460 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5461 skip_move: ;
5462 #endif
5463 } else if (xop >= 0x20 && xop < 0x24) {
5464 if (gen_trap_ifnofpu(dc)) {
5465 goto jmp_insn;
5467 switch (xop) {
5468 case 0x20: /* ldf, load fpreg */
5469 gen_address_mask(dc, cpu_addr);
5470 cpu_dst_32 = gen_dest_fpr_F(dc);
5471 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5472 dc->mem_idx, MO_TEUL);
5473 gen_store_fpr_F(dc, rd, cpu_dst_32);
5474 break;
5475 case 0x21: /* ldfsr, V9 ldxfsr */
5476 #ifdef TARGET_SPARC64
5477 gen_address_mask(dc, cpu_addr);
5478 if (rd == 1) {
5479 TCGv_i64 t64 = tcg_temp_new_i64();
5480 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5481 dc->mem_idx, MO_TEQ);
5482 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5483 tcg_temp_free_i64(t64);
5484 break;
5486 #endif
5487 cpu_dst_32 = get_temp_i32(dc);
5488 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5489 dc->mem_idx, MO_TEUL);
5490 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5491 break;
5492 case 0x22: /* ldqf, load quad fpreg */
5493 CHECK_FPU_FEATURE(dc, FLOAT128);
5494 gen_address_mask(dc, cpu_addr);
5495 cpu_src1_64 = tcg_temp_new_i64();
5496 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5497 MO_TEQ | MO_ALIGN_4);
5498 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5499 cpu_src2_64 = tcg_temp_new_i64();
5500 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5501 MO_TEQ | MO_ALIGN_4);
5502 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5503 tcg_temp_free_i64(cpu_src1_64);
5504 tcg_temp_free_i64(cpu_src2_64);
5505 break;
5506 case 0x23: /* lddf, load double fpreg */
5507 gen_address_mask(dc, cpu_addr);
5508 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5509 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5510 MO_TEQ | MO_ALIGN_4);
5511 gen_store_fpr_D(dc, rd, cpu_dst_64);
5512 break;
5513 default:
5514 goto illegal_insn;
5516 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5517 xop == 0xe || xop == 0x1e) {
5518 TCGv cpu_val = gen_load_gpr(dc, rd);
5520 switch (xop) {
5521 case 0x4: /* st, store word */
5522 gen_address_mask(dc, cpu_addr);
5523 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5524 break;
5525 case 0x5: /* stb, store byte */
5526 gen_address_mask(dc, cpu_addr);
5527 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5528 break;
5529 case 0x6: /* sth, store halfword */
5530 gen_address_mask(dc, cpu_addr);
5531 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5532 break;
5533 case 0x7: /* std, store double word */
5534 if (rd & 1)
5535 goto illegal_insn;
5536 else {
5537 TCGv_i64 t64;
5538 TCGv lo;
5540 gen_address_mask(dc, cpu_addr);
5541 lo = gen_load_gpr(dc, rd + 1);
5542 t64 = tcg_temp_new_i64();
5543 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5544 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5545 tcg_temp_free_i64(t64);
5547 break;
5548 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5549 case 0x14: /* sta, V9 stwa, store word alternate */
5550 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5551 break;
5552 case 0x15: /* stba, store byte alternate */
5553 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5554 break;
5555 case 0x16: /* stha, store halfword alternate */
5556 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5557 break;
5558 case 0x17: /* stda, store double word alternate */
5559 if (rd & 1) {
5560 goto illegal_insn;
5562 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5563 break;
5564 #endif
5565 #ifdef TARGET_SPARC64
5566 case 0x0e: /* V9 stx */
5567 gen_address_mask(dc, cpu_addr);
5568 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5569 break;
5570 case 0x1e: /* V9 stxa */
5571 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5572 break;
5573 #endif
5574 default:
5575 goto illegal_insn;
5577 } else if (xop > 0x23 && xop < 0x28) {
5578 if (gen_trap_ifnofpu(dc)) {
5579 goto jmp_insn;
5581 switch (xop) {
5582 case 0x24: /* stf, store fpreg */
5583 gen_address_mask(dc, cpu_addr);
5584 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5585 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5586 dc->mem_idx, MO_TEUL);
5587 break;
5588 case 0x25: /* stfsr, V9 stxfsr */
5590 #ifdef TARGET_SPARC64
5591 gen_address_mask(dc, cpu_addr);
5592 if (rd == 1) {
5593 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5594 break;
5596 #endif
5597 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5599 break;
5600 case 0x26:
5601 #ifdef TARGET_SPARC64
5602 /* V9 stqf, store quad fpreg */
5603 CHECK_FPU_FEATURE(dc, FLOAT128);
5604 gen_address_mask(dc, cpu_addr);
5605 /* ??? While stqf only requires 4-byte alignment, it is
5606 legal for the cpu to signal the unaligned exception.
5607 The OS trap handler is then required to fix it up.
5608 For qemu, this avoids having to probe the second page
5609 before performing the first write. */
5610 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5611 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5612 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5613 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5614 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5615 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5616 dc->mem_idx, MO_TEQ);
5617 break;
5618 #else /* !TARGET_SPARC64 */
5619 /* stdfq, store floating point queue */
5620 #if defined(CONFIG_USER_ONLY)
5621 goto illegal_insn;
5622 #else
5623 if (!supervisor(dc))
5624 goto priv_insn;
5625 if (gen_trap_ifnofpu(dc)) {
5626 goto jmp_insn;
5628 goto nfq_insn;
5629 #endif
5630 #endif
5631 case 0x27: /* stdf, store double fpreg */
5632 gen_address_mask(dc, cpu_addr);
5633 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5634 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5635 MO_TEQ | MO_ALIGN_4);
5636 break;
5637 default:
5638 goto illegal_insn;
5640 } else if (xop > 0x33 && xop < 0x3f) {
5641 switch (xop) {
5642 #ifdef TARGET_SPARC64
5643 case 0x34: /* V9 stfa */
5644 if (gen_trap_ifnofpu(dc)) {
5645 goto jmp_insn;
5647 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5648 break;
5649 case 0x36: /* V9 stqfa */
5651 CHECK_FPU_FEATURE(dc, FLOAT128);
5652 if (gen_trap_ifnofpu(dc)) {
5653 goto jmp_insn;
5655 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5657 break;
5658 case 0x37: /* V9 stdfa */
5659 if (gen_trap_ifnofpu(dc)) {
5660 goto jmp_insn;
5662 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5663 break;
5664 case 0x3e: /* V9 casxa */
5665 rs2 = GET_FIELD(insn, 27, 31);
5666 cpu_src2 = gen_load_gpr(dc, rs2);
5667 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5668 break;
5669 #else
5670 case 0x34: /* stc */
5671 case 0x35: /* stcsr */
5672 case 0x36: /* stdcq */
5673 case 0x37: /* stdc */
5674 goto ncp_insn;
5675 #endif
5676 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5677 case 0x3c: /* V9 or LEON3 casa */
5678 #ifndef TARGET_SPARC64
5679 CHECK_IU_FEATURE(dc, CASA);
5680 #endif
5681 rs2 = GET_FIELD(insn, 27, 31);
5682 cpu_src2 = gen_load_gpr(dc, rs2);
5683 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5684 break;
5685 #endif
5686 default:
5687 goto illegal_insn;
5689 } else {
5690 goto illegal_insn;
5693 break;
5695 /* default case for non jump instructions */
5696 if (dc->npc == DYNAMIC_PC) {
5697 dc->pc = DYNAMIC_PC;
5698 gen_op_next_insn();
5699 } else if (dc->npc == JUMP_PC) {
5700 /* we can do a static jump */
5701 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5702 dc->is_br = 1;
5703 } else {
5704 dc->pc = dc->npc;
5705 dc->npc = dc->npc + 4;
5707 jmp_insn:
5708 goto egress;
5709 illegal_insn:
5710 gen_exception(dc, TT_ILL_INSN);
5711 goto egress;
5712 unimp_flush:
5713 gen_exception(dc, TT_UNIMP_FLUSH);
5714 goto egress;
5715 #if !defined(CONFIG_USER_ONLY)
5716 priv_insn:
5717 gen_exception(dc, TT_PRIV_INSN);
5718 goto egress;
5719 #endif
5720 nfpu_insn:
5721 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5722 goto egress;
5723 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5724 nfq_insn:
5725 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5726 goto egress;
5727 #endif
5728 #ifndef TARGET_SPARC64
5729 ncp_insn:
5730 gen_exception(dc, TT_NCP_INSN);
5731 goto egress;
5732 #endif
5733 egress:
5734 if (dc->n_t32 != 0) {
5735 int i;
5736 for (i = dc->n_t32 - 1; i >= 0; --i) {
5737 tcg_temp_free_i32(dc->t32[i]);
5739 dc->n_t32 = 0;
5741 if (dc->n_ttl != 0) {
5742 int i;
5743 for (i = dc->n_ttl - 1; i >= 0; --i) {
5744 tcg_temp_free(dc->ttl[i]);
5746 dc->n_ttl = 0;
5750 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5752 SPARCCPU *cpu = sparc_env_get_cpu(env);
5753 CPUState *cs = CPU(cpu);
5754 target_ulong pc_start, last_pc;
5755 DisasContext dc1, *dc = &dc1;
5756 int num_insns;
5757 int max_insns;
5758 unsigned int insn;
5760 memset(dc, 0, sizeof(DisasContext));
5761 dc->tb = tb;
5762 pc_start = tb->pc;
5763 dc->pc = pc_start;
5764 last_pc = dc->pc;
5765 dc->npc = (target_ulong) tb->cs_base;
5766 dc->cc_op = CC_OP_DYNAMIC;
5767 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5768 dc->def = env->def;
5769 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5770 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5771 dc->singlestep = (cs->singlestep_enabled || singlestep);
5772 #ifndef CONFIG_USER_ONLY
5773 dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5774 #endif
5775 #ifdef TARGET_SPARC64
5776 dc->fprs_dirty = 0;
5777 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5778 #ifndef CONFIG_USER_ONLY
5779 dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5780 #endif
5781 #endif
5783 num_insns = 0;
5784 max_insns = tb->cflags & CF_COUNT_MASK;
5785 if (max_insns == 0) {
5786 max_insns = CF_COUNT_MASK;
5788 if (max_insns > TCG_MAX_INSNS) {
5789 max_insns = TCG_MAX_INSNS;
5792 gen_tb_start(tb);
5793 do {
5794 if (dc->npc & JUMP_PC) {
5795 assert(dc->jump_pc[1] == dc->pc + 4);
5796 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5797 } else {
5798 tcg_gen_insn_start(dc->pc, dc->npc);
5800 num_insns++;
5801 last_pc = dc->pc;
5803 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5804 if (dc->pc != pc_start) {
5805 save_state(dc);
5807 gen_helper_debug(cpu_env);
5808 tcg_gen_exit_tb(0);
5809 dc->is_br = 1;
5810 goto exit_gen_loop;
5813 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5814 gen_io_start();
5817 insn = cpu_ldl_code(env, dc->pc);
5819 disas_sparc_insn(dc, insn);
5821 if (dc->is_br)
5822 break;
5823 /* if the next PC is different, we abort now */
5824 if (dc->pc != (last_pc + 4))
5825 break;
5826 /* if we reach a page boundary, we stop generation so that the
5827 PC of a TT_TFAULT exception is always in the right page */
5828 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5829 break;
5830 /* if single step mode, we generate only one instruction and
5831 generate an exception */
5832 if (dc->singlestep) {
5833 break;
5835 } while (!tcg_op_buf_full() &&
5836 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5837 num_insns < max_insns);
5839 exit_gen_loop:
5840 if (tb->cflags & CF_LAST_IO) {
5841 gen_io_end();
5843 if (!dc->is_br) {
5844 if (dc->pc != DYNAMIC_PC &&
5845 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5846 /* static PC and NPC: we can use direct chaining */
5847 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5848 } else {
5849 if (dc->pc != DYNAMIC_PC) {
5850 tcg_gen_movi_tl(cpu_pc, dc->pc);
5852 save_npc(dc);
5853 tcg_gen_exit_tb(0);
5856 gen_tb_end(tb, num_insns);
5858 tb->size = last_pc + 4 - pc_start;
5859 tb->icount = num_insns;
5861 #ifdef DEBUG_DISAS
5862 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5863 && qemu_log_in_addr_range(pc_start)) {
5864 qemu_log_lock();
5865 qemu_log("--------------\n");
5866 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5867 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5868 qemu_log("\n");
5869 qemu_log_unlock();
5871 #endif
5874 void gen_intermediate_code_init(CPUSPARCState *env)
5876 static int inited;
5877 static const char gregnames[32][4] = {
5878 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5879 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5880 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5881 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5883 static const char fregnames[32][4] = {
5884 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5885 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5886 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5887 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5890 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5891 #ifdef TARGET_SPARC64
5892 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5893 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5894 #else
5895 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5896 #endif
5897 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5898 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5901 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5902 #ifdef TARGET_SPARC64
5903 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5904 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5905 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5906 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5907 "hstick_cmpr" },
5908 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5909 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5910 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5911 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5912 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5913 #endif
5914 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5915 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5916 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5917 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5918 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5919 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5920 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5921 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5922 #ifndef CONFIG_USER_ONLY
5923 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5924 #endif
5927 unsigned int i;
5929 /* init various static tables */
5930 if (inited) {
5931 return;
5933 inited = 1;
5935 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5936 tcg_ctx.tcg_env = cpu_env;
5938 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5939 offsetof(CPUSPARCState, regwptr),
5940 "regwptr");
5942 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5943 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5946 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5947 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5950 TCGV_UNUSED(cpu_regs[0]);
5951 for (i = 1; i < 8; ++i) {
5952 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5953 offsetof(CPUSPARCState, gregs[i]),
5954 gregnames[i]);
5957 for (i = 8; i < 32; ++i) {
5958 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5959 (i - 8) * sizeof(target_ulong),
5960 gregnames[i]);
5963 for (i = 0; i < TARGET_DPREGS; i++) {
5964 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5965 offsetof(CPUSPARCState, fpr[i]),
5966 fregnames[i]);
5970 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5971 target_ulong *data)
5973 target_ulong pc = data[0];
5974 target_ulong npc = data[1];
5976 env->pc = pc;
5977 if (npc == DYNAMIC_PC) {
5978 /* dynamic NPC: already stored */
5979 } else if (npc & JUMP_PC) {
5980 /* jump PC: use 'cond' and the jump targets of the translation */
5981 if (env->cond) {
5982 env->npc = npc & ~3;
5983 } else {
5984 env->npc = pc + 4;
5986 } else {
5987 env->npc = npc;