tcg: correct 32-bit tcg_gen_ld8s_i64 sign-extension
[qemu/ar7.git] / target-sparc / translate.c
blob2205f898378a86aa047f44b30bf70a4172885199
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 #ifdef TARGET_SPARC64
86 int fprs_dirty;
87 int asi;
88 #endif
89 } DisasContext;
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
120 static int sign_extend(int x, int len)
122 len = 32 - len;
123 return (x << len) >> len;
126 #define IS_IMM (insn & (1<<13))
128 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
130 TCGv_i32 t;
131 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
132 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
133 return t;
136 static inline TCGv get_temp_tl(DisasContext *dc)
138 TCGv t;
139 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
140 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
141 return t;
144 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
146 #if defined(TARGET_SPARC64)
147 int bit = (rd < 32) ? 1 : 2;
148 /* If we know we've already set this bit within the TB,
149 we can avoid setting it again. */
150 if (!(dc->fprs_dirty & bit)) {
151 dc->fprs_dirty |= bit;
152 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
154 #endif
157 /* floating point registers moves */
158 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
160 #if TCG_TARGET_REG_BITS == 32
161 if (src & 1) {
162 return TCGV_LOW(cpu_fpr[src / 2]);
163 } else {
164 return TCGV_HIGH(cpu_fpr[src / 2]);
166 #else
167 if (src & 1) {
168 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
169 } else {
170 TCGv_i32 ret = get_temp_i32(dc);
171 TCGv_i64 t = tcg_temp_new_i64();
173 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
174 tcg_gen_extrl_i64_i32(ret, t);
175 tcg_temp_free_i64(t);
177 return ret;
179 #endif
182 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
184 #if TCG_TARGET_REG_BITS == 32
185 if (dst & 1) {
186 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
187 } else {
188 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
190 #else
191 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
192 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
193 (dst & 1 ? 0 : 32), 32);
194 #endif
195 gen_update_fprs_dirty(dc, dst);
198 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
200 return get_temp_i32(dc);
203 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
205 src = DFPREG(src);
206 return cpu_fpr[src / 2];
209 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
211 dst = DFPREG(dst);
212 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
213 gen_update_fprs_dirty(dc, dst);
216 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
218 return cpu_fpr[DFPREG(dst) / 2];
221 static void gen_op_load_fpr_QT0(unsigned int src)
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
226 offsetof(CPU_QuadU, ll.lower));
229 static void gen_op_load_fpr_QT1(unsigned int src)
231 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
234 offsetof(CPU_QuadU, ll.lower));
237 static void gen_op_store_QT0_fpr(unsigned int dst)
239 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
240 offsetof(CPU_QuadU, ll.upper));
241 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
242 offsetof(CPU_QuadU, ll.lower));
245 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
246 TCGv_i64 v1, TCGv_i64 v2)
248 dst = QFPREG(dst);
250 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
251 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
252 gen_update_fprs_dirty(dc, dst);
255 #ifdef TARGET_SPARC64
256 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
258 src = QFPREG(src);
259 return cpu_fpr[src / 2];
262 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
264 src = QFPREG(src);
265 return cpu_fpr[src / 2 + 1];
268 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
270 rd = QFPREG(rd);
271 rs = QFPREG(rs);
273 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
274 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
275 gen_update_fprs_dirty(dc, rd);
277 #endif
279 /* moves */
280 #ifdef CONFIG_USER_ONLY
281 #define supervisor(dc) 0
282 #ifdef TARGET_SPARC64
283 #define hypervisor(dc) 0
284 #endif
285 #else
286 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
287 #ifdef TARGET_SPARC64
288 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
289 #else
290 #endif
291 #endif
293 #ifdef TARGET_SPARC64
294 #ifndef TARGET_ABI32
295 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
296 #else
297 #define AM_CHECK(dc) (1)
298 #endif
299 #endif
301 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
303 #ifdef TARGET_SPARC64
304 if (AM_CHECK(dc))
305 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
306 #endif
309 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
311 if (reg > 0) {
312 assert(reg < 32);
313 return cpu_regs[reg];
314 } else {
315 TCGv t = get_temp_tl(dc);
316 tcg_gen_movi_tl(t, 0);
317 return t;
321 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
323 if (reg > 0) {
324 assert(reg < 32);
325 tcg_gen_mov_tl(cpu_regs[reg], v);
329 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
331 if (reg > 0) {
332 assert(reg < 32);
333 return cpu_regs[reg];
334 } else {
335 return get_temp_tl(dc);
339 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
340 target_ulong npc)
342 if (unlikely(s->singlestep)) {
343 return false;
346 #ifndef CONFIG_USER_ONLY
347 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
348 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
349 #else
350 return true;
351 #endif
354 static inline void gen_goto_tb(DisasContext *s, int tb_num,
355 target_ulong pc, target_ulong npc)
357 if (use_goto_tb(s, pc, npc)) {
358 /* jump to same page: we can use a direct jump */
359 tcg_gen_goto_tb(tb_num);
360 tcg_gen_movi_tl(cpu_pc, pc);
361 tcg_gen_movi_tl(cpu_npc, npc);
362 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
363 } else {
364 /* jump to another page: currently not optimized */
365 tcg_gen_movi_tl(cpu_pc, pc);
366 tcg_gen_movi_tl(cpu_npc, npc);
367 tcg_gen_exit_tb(0);
371 // XXX suboptimal
372 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
374 tcg_gen_extu_i32_tl(reg, src);
375 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
376 tcg_gen_andi_tl(reg, reg, 0x1);
379 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
381 tcg_gen_extu_i32_tl(reg, src);
382 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
383 tcg_gen_andi_tl(reg, reg, 0x1);
386 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
388 tcg_gen_extu_i32_tl(reg, src);
389 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
390 tcg_gen_andi_tl(reg, reg, 0x1);
393 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
395 tcg_gen_extu_i32_tl(reg, src);
396 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
397 tcg_gen_andi_tl(reg, reg, 0x1);
400 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
402 tcg_gen_mov_tl(cpu_cc_src, src1);
403 tcg_gen_mov_tl(cpu_cc_src2, src2);
404 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
405 tcg_gen_mov_tl(dst, cpu_cc_dst);
408 static TCGv_i32 gen_add32_carry32(void)
410 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
412 /* Carry is computed from a previous add: (dst < src) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32 = tcg_temp_new_i32();
415 cc_src2_32 = tcg_temp_new_i32();
416 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
417 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
418 #else
419 cc_src1_32 = cpu_cc_dst;
420 cc_src2_32 = cpu_cc_src;
421 #endif
423 carry_32 = tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
426 #if TARGET_LONG_BITS == 64
427 tcg_temp_free_i32(cc_src1_32);
428 tcg_temp_free_i32(cc_src2_32);
429 #endif
431 return carry_32;
434 static TCGv_i32 gen_sub32_carry32(void)
436 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
438 /* Carry is computed from a previous borrow: (src1 < src2) */
439 #if TARGET_LONG_BITS == 64
440 cc_src1_32 = tcg_temp_new_i32();
441 cc_src2_32 = tcg_temp_new_i32();
442 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
443 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
444 #else
445 cc_src1_32 = cpu_cc_src;
446 cc_src2_32 = cpu_cc_src2;
447 #endif
449 carry_32 = tcg_temp_new_i32();
450 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
452 #if TARGET_LONG_BITS == 64
453 tcg_temp_free_i32(cc_src1_32);
454 tcg_temp_free_i32(cc_src2_32);
455 #endif
457 return carry_32;
460 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
461 TCGv src2, int update_cc)
463 TCGv_i32 carry_32;
464 TCGv carry;
466 switch (dc->cc_op) {
467 case CC_OP_DIV:
468 case CC_OP_LOGIC:
469 /* Carry is known to be zero. Fall back to plain ADD. */
470 if (update_cc) {
471 gen_op_add_cc(dst, src1, src2);
472 } else {
473 tcg_gen_add_tl(dst, src1, src2);
475 return;
477 case CC_OP_ADD:
478 case CC_OP_TADD:
479 case CC_OP_TADDTV:
480 if (TARGET_LONG_BITS == 32) {
481 /* We can re-use the host's hardware carry generation by using
482 an ADD2 opcode. We discard the low part of the output.
483 Ideally we'd combine this operation with the add that
484 generated the carry in the first place. */
485 carry = tcg_temp_new();
486 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
487 tcg_temp_free(carry);
488 goto add_done;
490 carry_32 = gen_add32_carry32();
491 break;
493 case CC_OP_SUB:
494 case CC_OP_TSUB:
495 case CC_OP_TSUBTV:
496 carry_32 = gen_sub32_carry32();
497 break;
499 default:
500 /* We need external help to produce the carry. */
501 carry_32 = tcg_temp_new_i32();
502 gen_helper_compute_C_icc(carry_32, cpu_env);
503 break;
506 #if TARGET_LONG_BITS == 64
507 carry = tcg_temp_new();
508 tcg_gen_extu_i32_i64(carry, carry_32);
509 #else
510 carry = carry_32;
511 #endif
513 tcg_gen_add_tl(dst, src1, src2);
514 tcg_gen_add_tl(dst, dst, carry);
516 tcg_temp_free_i32(carry_32);
517 #if TARGET_LONG_BITS == 64
518 tcg_temp_free(carry);
519 #endif
521 add_done:
522 if (update_cc) {
523 tcg_gen_mov_tl(cpu_cc_src, src1);
524 tcg_gen_mov_tl(cpu_cc_src2, src2);
525 tcg_gen_mov_tl(cpu_cc_dst, dst);
526 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
527 dc->cc_op = CC_OP_ADDX;
531 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
533 tcg_gen_mov_tl(cpu_cc_src, src1);
534 tcg_gen_mov_tl(cpu_cc_src2, src2);
535 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
536 tcg_gen_mov_tl(dst, cpu_cc_dst);
539 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
540 TCGv src2, int update_cc)
542 TCGv_i32 carry_32;
543 TCGv carry;
545 switch (dc->cc_op) {
546 case CC_OP_DIV:
547 case CC_OP_LOGIC:
548 /* Carry is known to be zero. Fall back to plain SUB. */
549 if (update_cc) {
550 gen_op_sub_cc(dst, src1, src2);
551 } else {
552 tcg_gen_sub_tl(dst, src1, src2);
554 return;
556 case CC_OP_ADD:
557 case CC_OP_TADD:
558 case CC_OP_TADDTV:
559 carry_32 = gen_add32_carry32();
560 break;
562 case CC_OP_SUB:
563 case CC_OP_TSUB:
564 case CC_OP_TSUBTV:
565 if (TARGET_LONG_BITS == 32) {
566 /* We can re-use the host's hardware carry generation by using
567 a SUB2 opcode. We discard the low part of the output.
568 Ideally we'd combine this operation with the add that
569 generated the carry in the first place. */
570 carry = tcg_temp_new();
571 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
572 tcg_temp_free(carry);
573 goto sub_done;
575 carry_32 = gen_sub32_carry32();
576 break;
578 default:
579 /* We need external help to produce the carry. */
580 carry_32 = tcg_temp_new_i32();
581 gen_helper_compute_C_icc(carry_32, cpu_env);
582 break;
585 #if TARGET_LONG_BITS == 64
586 carry = tcg_temp_new();
587 tcg_gen_extu_i32_i64(carry, carry_32);
588 #else
589 carry = carry_32;
590 #endif
592 tcg_gen_sub_tl(dst, src1, src2);
593 tcg_gen_sub_tl(dst, dst, carry);
595 tcg_temp_free_i32(carry_32);
596 #if TARGET_LONG_BITS == 64
597 tcg_temp_free(carry);
598 #endif
600 sub_done:
601 if (update_cc) {
602 tcg_gen_mov_tl(cpu_cc_src, src1);
603 tcg_gen_mov_tl(cpu_cc_src2, src2);
604 tcg_gen_mov_tl(cpu_cc_dst, dst);
605 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
606 dc->cc_op = CC_OP_SUBX;
610 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
612 TCGv r_temp, zero, t0;
614 r_temp = tcg_temp_new();
615 t0 = tcg_temp_new();
617 /* old op:
618 if (!(env->y & 1))
619 T1 = 0;
621 zero = tcg_const_tl(0);
622 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
623 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
624 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
625 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
626 zero, cpu_cc_src2);
627 tcg_temp_free(zero);
629 // b2 = T0 & 1;
630 // env->y = (b2 << 31) | (env->y >> 1);
631 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
632 tcg_gen_shli_tl(r_temp, r_temp, 31);
633 tcg_gen_shri_tl(t0, cpu_y, 1);
634 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
635 tcg_gen_or_tl(t0, t0, r_temp);
636 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
638 // b1 = N ^ V;
639 gen_mov_reg_N(t0, cpu_psr);
640 gen_mov_reg_V(r_temp, cpu_psr);
641 tcg_gen_xor_tl(t0, t0, r_temp);
642 tcg_temp_free(r_temp);
644 // T0 = (b1 << 31) | (T0 >> 1);
645 // src1 = T0;
646 tcg_gen_shli_tl(t0, t0, 31);
647 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
648 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
649 tcg_temp_free(t0);
651 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
653 tcg_gen_mov_tl(dst, cpu_cc_dst);
656 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
658 #if TARGET_LONG_BITS == 32
659 if (sign_ext) {
660 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
661 } else {
662 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
664 #else
665 TCGv t0 = tcg_temp_new_i64();
666 TCGv t1 = tcg_temp_new_i64();
668 if (sign_ext) {
669 tcg_gen_ext32s_i64(t0, src1);
670 tcg_gen_ext32s_i64(t1, src2);
671 } else {
672 tcg_gen_ext32u_i64(t0, src1);
673 tcg_gen_ext32u_i64(t1, src2);
676 tcg_gen_mul_i64(dst, t0, t1);
677 tcg_temp_free(t0);
678 tcg_temp_free(t1);
680 tcg_gen_shri_i64(cpu_y, dst, 32);
681 #endif
684 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
686 /* zero-extend truncated operands before multiplication */
687 gen_op_multiply(dst, src1, src2, 0);
690 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
692 /* sign-extend truncated operands before multiplication */
693 gen_op_multiply(dst, src1, src2, 1);
696 // 1
697 static inline void gen_op_eval_ba(TCGv dst)
699 tcg_gen_movi_tl(dst, 1);
702 // Z
703 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
705 gen_mov_reg_Z(dst, src);
708 // Z | (N ^ V)
709 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
711 TCGv t0 = tcg_temp_new();
712 gen_mov_reg_N(t0, src);
713 gen_mov_reg_V(dst, src);
714 tcg_gen_xor_tl(dst, dst, t0);
715 gen_mov_reg_Z(t0, src);
716 tcg_gen_or_tl(dst, dst, t0);
717 tcg_temp_free(t0);
720 // N ^ V
721 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
723 TCGv t0 = tcg_temp_new();
724 gen_mov_reg_V(t0, src);
725 gen_mov_reg_N(dst, src);
726 tcg_gen_xor_tl(dst, dst, t0);
727 tcg_temp_free(t0);
730 // C | Z
731 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
733 TCGv t0 = tcg_temp_new();
734 gen_mov_reg_Z(t0, src);
735 gen_mov_reg_C(dst, src);
736 tcg_gen_or_tl(dst, dst, t0);
737 tcg_temp_free(t0);
740 // C
741 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
743 gen_mov_reg_C(dst, src);
746 // V
747 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
749 gen_mov_reg_V(dst, src);
752 // 0
753 static inline void gen_op_eval_bn(TCGv dst)
755 tcg_gen_movi_tl(dst, 0);
758 // N
759 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
761 gen_mov_reg_N(dst, src);
764 // !Z
765 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
767 gen_mov_reg_Z(dst, src);
768 tcg_gen_xori_tl(dst, dst, 0x1);
771 // !(Z | (N ^ V))
772 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
774 gen_op_eval_ble(dst, src);
775 tcg_gen_xori_tl(dst, dst, 0x1);
778 // !(N ^ V)
779 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
781 gen_op_eval_bl(dst, src);
782 tcg_gen_xori_tl(dst, dst, 0x1);
785 // !(C | Z)
786 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
788 gen_op_eval_bleu(dst, src);
789 tcg_gen_xori_tl(dst, dst, 0x1);
792 // !C
793 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
795 gen_mov_reg_C(dst, src);
796 tcg_gen_xori_tl(dst, dst, 0x1);
799 // !N
800 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
802 gen_mov_reg_N(dst, src);
803 tcg_gen_xori_tl(dst, dst, 0x1);
806 // !V
807 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
809 gen_mov_reg_V(dst, src);
810 tcg_gen_xori_tl(dst, dst, 0x1);
814 FPSR bit field FCC1 | FCC0:
818 3 unordered
820 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
821 unsigned int fcc_offset)
823 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
824 tcg_gen_andi_tl(reg, reg, 0x1);
827 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
828 unsigned int fcc_offset)
830 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
831 tcg_gen_andi_tl(reg, reg, 0x1);
834 // !0: FCC0 | FCC1
835 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
838 TCGv t0 = tcg_temp_new();
839 gen_mov_reg_FCC0(dst, src, fcc_offset);
840 gen_mov_reg_FCC1(t0, src, fcc_offset);
841 tcg_gen_or_tl(dst, dst, t0);
842 tcg_temp_free(t0);
845 // 1 or 2: FCC0 ^ FCC1
846 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
847 unsigned int fcc_offset)
849 TCGv t0 = tcg_temp_new();
850 gen_mov_reg_FCC0(dst, src, fcc_offset);
851 gen_mov_reg_FCC1(t0, src, fcc_offset);
852 tcg_gen_xor_tl(dst, dst, t0);
853 tcg_temp_free(t0);
856 // 1 or 3: FCC0
857 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
858 unsigned int fcc_offset)
860 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 // 1: FCC0 & !FCC1
864 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
865 unsigned int fcc_offset)
867 TCGv t0 = tcg_temp_new();
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
869 gen_mov_reg_FCC1(t0, src, fcc_offset);
870 tcg_gen_andc_tl(dst, dst, t0);
871 tcg_temp_free(t0);
874 // 2 or 3: FCC1
875 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
876 unsigned int fcc_offset)
878 gen_mov_reg_FCC1(dst, src, fcc_offset);
881 // 2: !FCC0 & FCC1
882 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
883 unsigned int fcc_offset)
885 TCGv t0 = tcg_temp_new();
886 gen_mov_reg_FCC0(dst, src, fcc_offset);
887 gen_mov_reg_FCC1(t0, src, fcc_offset);
888 tcg_gen_andc_tl(dst, t0, dst);
889 tcg_temp_free(t0);
892 // 3: FCC0 & FCC1
893 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
894 unsigned int fcc_offset)
896 TCGv t0 = tcg_temp_new();
897 gen_mov_reg_FCC0(dst, src, fcc_offset);
898 gen_mov_reg_FCC1(t0, src, fcc_offset);
899 tcg_gen_and_tl(dst, dst, t0);
900 tcg_temp_free(t0);
903 // 0: !(FCC0 | FCC1)
904 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
905 unsigned int fcc_offset)
907 TCGv t0 = tcg_temp_new();
908 gen_mov_reg_FCC0(dst, src, fcc_offset);
909 gen_mov_reg_FCC1(t0, src, fcc_offset);
910 tcg_gen_or_tl(dst, dst, t0);
911 tcg_gen_xori_tl(dst, dst, 0x1);
912 tcg_temp_free(t0);
915 // 0 or 3: !(FCC0 ^ FCC1)
916 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
917 unsigned int fcc_offset)
919 TCGv t0 = tcg_temp_new();
920 gen_mov_reg_FCC0(dst, src, fcc_offset);
921 gen_mov_reg_FCC1(t0, src, fcc_offset);
922 tcg_gen_xor_tl(dst, dst, t0);
923 tcg_gen_xori_tl(dst, dst, 0x1);
924 tcg_temp_free(t0);
927 // 0 or 2: !FCC0
928 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
929 unsigned int fcc_offset)
931 gen_mov_reg_FCC0(dst, src, fcc_offset);
932 tcg_gen_xori_tl(dst, dst, 0x1);
935 // !1: !(FCC0 & !FCC1)
936 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
939 TCGv t0 = tcg_temp_new();
940 gen_mov_reg_FCC0(dst, src, fcc_offset);
941 gen_mov_reg_FCC1(t0, src, fcc_offset);
942 tcg_gen_andc_tl(dst, dst, t0);
943 tcg_gen_xori_tl(dst, dst, 0x1);
944 tcg_temp_free(t0);
947 // 0 or 1: !FCC1
948 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
949 unsigned int fcc_offset)
951 gen_mov_reg_FCC1(dst, src, fcc_offset);
952 tcg_gen_xori_tl(dst, dst, 0x1);
955 // !2: !(!FCC0 & FCC1)
956 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
957 unsigned int fcc_offset)
959 TCGv t0 = tcg_temp_new();
960 gen_mov_reg_FCC0(dst, src, fcc_offset);
961 gen_mov_reg_FCC1(t0, src, fcc_offset);
962 tcg_gen_andc_tl(dst, t0, dst);
963 tcg_gen_xori_tl(dst, dst, 0x1);
964 tcg_temp_free(t0);
967 // !3: !(FCC0 & FCC1)
968 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
969 unsigned int fcc_offset)
971 TCGv t0 = tcg_temp_new();
972 gen_mov_reg_FCC0(dst, src, fcc_offset);
973 gen_mov_reg_FCC1(t0, src, fcc_offset);
974 tcg_gen_and_tl(dst, dst, t0);
975 tcg_gen_xori_tl(dst, dst, 0x1);
976 tcg_temp_free(t0);
979 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
980 target_ulong pc2, TCGv r_cond)
982 TCGLabel *l1 = gen_new_label();
984 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
986 gen_goto_tb(dc, 0, pc1, pc1 + 4);
988 gen_set_label(l1);
989 gen_goto_tb(dc, 1, pc2, pc2 + 4);
992 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
994 TCGLabel *l1 = gen_new_label();
995 target_ulong npc = dc->npc;
997 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
999 gen_goto_tb(dc, 0, npc, pc1);
1001 gen_set_label(l1);
1002 gen_goto_tb(dc, 1, npc + 4, npc + 8);
1004 dc->is_br = 1;
1007 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1009 target_ulong npc = dc->npc;
1011 if (likely(npc != DYNAMIC_PC)) {
1012 dc->pc = npc;
1013 dc->jump_pc[0] = pc1;
1014 dc->jump_pc[1] = npc + 4;
1015 dc->npc = JUMP_PC;
1016 } else {
1017 TCGv t, z;
1019 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1021 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1022 t = tcg_const_tl(pc1);
1023 z = tcg_const_tl(0);
1024 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1025 tcg_temp_free(t);
1026 tcg_temp_free(z);
1028 dc->pc = DYNAMIC_PC;
1032 static inline void gen_generic_branch(DisasContext *dc)
1034 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1035 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1036 TCGv zero = tcg_const_tl(0);
1038 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1040 tcg_temp_free(npc0);
1041 tcg_temp_free(npc1);
1042 tcg_temp_free(zero);
1045 /* call this function before using the condition register as it may
1046 have been set for a jump */
1047 static inline void flush_cond(DisasContext *dc)
1049 if (dc->npc == JUMP_PC) {
1050 gen_generic_branch(dc);
1051 dc->npc = DYNAMIC_PC;
1055 static inline void save_npc(DisasContext *dc)
1057 if (dc->npc == JUMP_PC) {
1058 gen_generic_branch(dc);
1059 dc->npc = DYNAMIC_PC;
1060 } else if (dc->npc != DYNAMIC_PC) {
1061 tcg_gen_movi_tl(cpu_npc, dc->npc);
1065 static inline void update_psr(DisasContext *dc)
1067 if (dc->cc_op != CC_OP_FLAGS) {
1068 dc->cc_op = CC_OP_FLAGS;
1069 gen_helper_compute_psr(cpu_env);
1073 static inline void save_state(DisasContext *dc)
1075 tcg_gen_movi_tl(cpu_pc, dc->pc);
1076 save_npc(dc);
1079 static void gen_exception(DisasContext *dc, int which)
1081 TCGv_i32 t;
1083 save_state(dc);
1084 t = tcg_const_i32(which);
1085 gen_helper_raise_exception(cpu_env, t);
1086 tcg_temp_free_i32(t);
1087 dc->is_br = 1;
1090 static void gen_check_align(TCGv addr, int mask)
1092 TCGv_i32 r_mask = tcg_const_i32(mask);
1093 gen_helper_check_align(cpu_env, addr, r_mask);
1094 tcg_temp_free_i32(r_mask);
1097 static inline void gen_mov_pc_npc(DisasContext *dc)
1099 if (dc->npc == JUMP_PC) {
1100 gen_generic_branch(dc);
1101 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102 dc->pc = DYNAMIC_PC;
1103 } else if (dc->npc == DYNAMIC_PC) {
1104 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1105 dc->pc = DYNAMIC_PC;
1106 } else {
1107 dc->pc = dc->npc;
1111 static inline void gen_op_next_insn(void)
1113 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1114 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1117 static void free_compare(DisasCompare *cmp)
1119 if (!cmp->g1) {
1120 tcg_temp_free(cmp->c1);
1122 if (!cmp->g2) {
1123 tcg_temp_free(cmp->c2);
1127 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1128 DisasContext *dc)
1130 static int subcc_cond[16] = {
1131 TCG_COND_NEVER,
1132 TCG_COND_EQ,
1133 TCG_COND_LE,
1134 TCG_COND_LT,
1135 TCG_COND_LEU,
1136 TCG_COND_LTU,
1137 -1, /* neg */
1138 -1, /* overflow */
1139 TCG_COND_ALWAYS,
1140 TCG_COND_NE,
1141 TCG_COND_GT,
1142 TCG_COND_GE,
1143 TCG_COND_GTU,
1144 TCG_COND_GEU,
1145 -1, /* pos */
1146 -1, /* no overflow */
1149 static int logic_cond[16] = {
1150 TCG_COND_NEVER,
1151 TCG_COND_EQ, /* eq: Z */
1152 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1153 TCG_COND_LT, /* lt: N ^ V -> N */
1154 TCG_COND_EQ, /* leu: C | Z -> Z */
1155 TCG_COND_NEVER, /* ltu: C -> 0 */
1156 TCG_COND_LT, /* neg: N */
1157 TCG_COND_NEVER, /* vs: V -> 0 */
1158 TCG_COND_ALWAYS,
1159 TCG_COND_NE, /* ne: !Z */
1160 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1161 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1162 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1163 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1164 TCG_COND_GE, /* pos: !N */
1165 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1168 TCGv_i32 r_src;
1169 TCGv r_dst;
1171 #ifdef TARGET_SPARC64
1172 if (xcc) {
1173 r_src = cpu_xcc;
1174 } else {
1175 r_src = cpu_psr;
1177 #else
1178 r_src = cpu_psr;
1179 #endif
1181 switch (dc->cc_op) {
1182 case CC_OP_LOGIC:
1183 cmp->cond = logic_cond[cond];
1184 do_compare_dst_0:
1185 cmp->is_bool = false;
1186 cmp->g2 = false;
1187 cmp->c2 = tcg_const_tl(0);
1188 #ifdef TARGET_SPARC64
1189 if (!xcc) {
1190 cmp->g1 = false;
1191 cmp->c1 = tcg_temp_new();
1192 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1193 break;
1195 #endif
1196 cmp->g1 = true;
1197 cmp->c1 = cpu_cc_dst;
1198 break;
1200 case CC_OP_SUB:
1201 switch (cond) {
1202 case 6: /* neg */
1203 case 14: /* pos */
1204 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1205 goto do_compare_dst_0;
1207 case 7: /* overflow */
1208 case 15: /* !overflow */
1209 goto do_dynamic;
1211 default:
1212 cmp->cond = subcc_cond[cond];
1213 cmp->is_bool = false;
1214 #ifdef TARGET_SPARC64
1215 if (!xcc) {
1216 /* Note that sign-extension works for unsigned compares as
1217 long as both operands are sign-extended. */
1218 cmp->g1 = cmp->g2 = false;
1219 cmp->c1 = tcg_temp_new();
1220 cmp->c2 = tcg_temp_new();
1221 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1222 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1223 break;
1225 #endif
1226 cmp->g1 = cmp->g2 = true;
1227 cmp->c1 = cpu_cc_src;
1228 cmp->c2 = cpu_cc_src2;
1229 break;
1231 break;
1233 default:
1234 do_dynamic:
1235 gen_helper_compute_psr(cpu_env);
1236 dc->cc_op = CC_OP_FLAGS;
1237 /* FALLTHRU */
1239 case CC_OP_FLAGS:
1240 /* We're going to generate a boolean result. */
1241 cmp->cond = TCG_COND_NE;
1242 cmp->is_bool = true;
1243 cmp->g1 = cmp->g2 = false;
1244 cmp->c1 = r_dst = tcg_temp_new();
1245 cmp->c2 = tcg_const_tl(0);
1247 switch (cond) {
1248 case 0x0:
1249 gen_op_eval_bn(r_dst);
1250 break;
1251 case 0x1:
1252 gen_op_eval_be(r_dst, r_src);
1253 break;
1254 case 0x2:
1255 gen_op_eval_ble(r_dst, r_src);
1256 break;
1257 case 0x3:
1258 gen_op_eval_bl(r_dst, r_src);
1259 break;
1260 case 0x4:
1261 gen_op_eval_bleu(r_dst, r_src);
1262 break;
1263 case 0x5:
1264 gen_op_eval_bcs(r_dst, r_src);
1265 break;
1266 case 0x6:
1267 gen_op_eval_bneg(r_dst, r_src);
1268 break;
1269 case 0x7:
1270 gen_op_eval_bvs(r_dst, r_src);
1271 break;
1272 case 0x8:
1273 gen_op_eval_ba(r_dst);
1274 break;
1275 case 0x9:
1276 gen_op_eval_bne(r_dst, r_src);
1277 break;
1278 case 0xa:
1279 gen_op_eval_bg(r_dst, r_src);
1280 break;
1281 case 0xb:
1282 gen_op_eval_bge(r_dst, r_src);
1283 break;
1284 case 0xc:
1285 gen_op_eval_bgu(r_dst, r_src);
1286 break;
1287 case 0xd:
1288 gen_op_eval_bcc(r_dst, r_src);
1289 break;
1290 case 0xe:
1291 gen_op_eval_bpos(r_dst, r_src);
1292 break;
1293 case 0xf:
1294 gen_op_eval_bvc(r_dst, r_src);
1295 break;
1297 break;
1301 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1303 unsigned int offset;
1304 TCGv r_dst;
1306 /* For now we still generate a straight boolean result. */
1307 cmp->cond = TCG_COND_NE;
1308 cmp->is_bool = true;
1309 cmp->g1 = cmp->g2 = false;
1310 cmp->c1 = r_dst = tcg_temp_new();
1311 cmp->c2 = tcg_const_tl(0);
1313 switch (cc) {
1314 default:
1315 case 0x0:
1316 offset = 0;
1317 break;
1318 case 0x1:
1319 offset = 32 - 10;
1320 break;
1321 case 0x2:
1322 offset = 34 - 10;
1323 break;
1324 case 0x3:
1325 offset = 36 - 10;
1326 break;
1329 switch (cond) {
1330 case 0x0:
1331 gen_op_eval_bn(r_dst);
1332 break;
1333 case 0x1:
1334 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1335 break;
1336 case 0x2:
1337 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1338 break;
1339 case 0x3:
1340 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1341 break;
1342 case 0x4:
1343 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1344 break;
1345 case 0x5:
1346 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1347 break;
1348 case 0x6:
1349 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1350 break;
1351 case 0x7:
1352 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1353 break;
1354 case 0x8:
1355 gen_op_eval_ba(r_dst);
1356 break;
1357 case 0x9:
1358 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1359 break;
1360 case 0xa:
1361 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1362 break;
1363 case 0xb:
1364 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1365 break;
1366 case 0xc:
1367 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1368 break;
1369 case 0xd:
1370 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1371 break;
1372 case 0xe:
1373 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1374 break;
1375 case 0xf:
1376 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1377 break;
1381 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1382 DisasContext *dc)
1384 DisasCompare cmp;
1385 gen_compare(&cmp, cc, cond, dc);
1387 /* The interface is to return a boolean in r_dst. */
1388 if (cmp.is_bool) {
1389 tcg_gen_mov_tl(r_dst, cmp.c1);
1390 } else {
1391 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1394 free_compare(&cmp);
1397 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1399 DisasCompare cmp;
1400 gen_fcompare(&cmp, cc, cond);
1402 /* The interface is to return a boolean in r_dst. */
1403 if (cmp.is_bool) {
1404 tcg_gen_mov_tl(r_dst, cmp.c1);
1405 } else {
1406 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1409 free_compare(&cmp);
1412 #ifdef TARGET_SPARC64
1413 // Inverted logic
1414 static const int gen_tcg_cond_reg[8] = {
1416 TCG_COND_NE,
1417 TCG_COND_GT,
1418 TCG_COND_GE,
1420 TCG_COND_EQ,
1421 TCG_COND_LE,
1422 TCG_COND_LT,
1425 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1427 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1428 cmp->is_bool = false;
1429 cmp->g1 = true;
1430 cmp->g2 = false;
1431 cmp->c1 = r_src;
1432 cmp->c2 = tcg_const_tl(0);
1435 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1437 DisasCompare cmp;
1438 gen_compare_reg(&cmp, cond, r_src);
1440 /* The interface is to return a boolean in r_dst. */
1441 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1443 free_compare(&cmp);
1445 #endif
1447 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1449 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1450 target_ulong target = dc->pc + offset;
1452 #ifdef TARGET_SPARC64
1453 if (unlikely(AM_CHECK(dc))) {
1454 target &= 0xffffffffULL;
1456 #endif
1457 if (cond == 0x0) {
1458 /* unconditional not taken */
1459 if (a) {
1460 dc->pc = dc->npc + 4;
1461 dc->npc = dc->pc + 4;
1462 } else {
1463 dc->pc = dc->npc;
1464 dc->npc = dc->pc + 4;
1466 } else if (cond == 0x8) {
1467 /* unconditional taken */
1468 if (a) {
1469 dc->pc = target;
1470 dc->npc = dc->pc + 4;
1471 } else {
1472 dc->pc = dc->npc;
1473 dc->npc = target;
1474 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1476 } else {
1477 flush_cond(dc);
1478 gen_cond(cpu_cond, cc, cond, dc);
1479 if (a) {
1480 gen_branch_a(dc, target);
1481 } else {
1482 gen_branch_n(dc, target);
1487 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1489 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1490 target_ulong target = dc->pc + offset;
1492 #ifdef TARGET_SPARC64
1493 if (unlikely(AM_CHECK(dc))) {
1494 target &= 0xffffffffULL;
1496 #endif
1497 if (cond == 0x0) {
1498 /* unconditional not taken */
1499 if (a) {
1500 dc->pc = dc->npc + 4;
1501 dc->npc = dc->pc + 4;
1502 } else {
1503 dc->pc = dc->npc;
1504 dc->npc = dc->pc + 4;
1506 } else if (cond == 0x8) {
1507 /* unconditional taken */
1508 if (a) {
1509 dc->pc = target;
1510 dc->npc = dc->pc + 4;
1511 } else {
1512 dc->pc = dc->npc;
1513 dc->npc = target;
1514 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1516 } else {
1517 flush_cond(dc);
1518 gen_fcond(cpu_cond, cc, cond);
1519 if (a) {
1520 gen_branch_a(dc, target);
1521 } else {
1522 gen_branch_n(dc, target);
1527 #ifdef TARGET_SPARC64
1528 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1529 TCGv r_reg)
1531 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1532 target_ulong target = dc->pc + offset;
1534 if (unlikely(AM_CHECK(dc))) {
1535 target &= 0xffffffffULL;
1537 flush_cond(dc);
1538 gen_cond_reg(cpu_cond, cond, r_reg);
1539 if (a) {
1540 gen_branch_a(dc, target);
1541 } else {
1542 gen_branch_n(dc, target);
1546 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1548 switch (fccno) {
1549 case 0:
1550 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1551 break;
1552 case 1:
1553 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1554 break;
1555 case 2:
1556 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1557 break;
1558 case 3:
1559 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1560 break;
1564 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1566 switch (fccno) {
1567 case 0:
1568 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1569 break;
1570 case 1:
1571 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1572 break;
1573 case 2:
1574 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1575 break;
1576 case 3:
1577 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1578 break;
1582 static inline void gen_op_fcmpq(int fccno)
1584 switch (fccno) {
1585 case 0:
1586 gen_helper_fcmpq(cpu_fsr, cpu_env);
1587 break;
1588 case 1:
1589 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1590 break;
1591 case 2:
1592 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1593 break;
1594 case 3:
1595 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1596 break;
1600 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1602 switch (fccno) {
1603 case 0:
1604 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1605 break;
1606 case 1:
1607 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1608 break;
1609 case 2:
1610 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1611 break;
1612 case 3:
1613 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1614 break;
1618 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1620 switch (fccno) {
1621 case 0:
1622 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1623 break;
1624 case 1:
1625 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1626 break;
1627 case 2:
1628 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1629 break;
1630 case 3:
1631 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1632 break;
1636 static inline void gen_op_fcmpeq(int fccno)
1638 switch (fccno) {
1639 case 0:
1640 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1641 break;
1642 case 1:
1643 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1644 break;
1645 case 2:
1646 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1647 break;
1648 case 3:
1649 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1650 break;
1654 #else
1656 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1658 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1661 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1663 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1666 static inline void gen_op_fcmpq(int fccno)
1668 gen_helper_fcmpq(cpu_fsr, cpu_env);
1671 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1673 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1676 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1678 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1681 static inline void gen_op_fcmpeq(int fccno)
1683 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1685 #endif
1687 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1689 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1690 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1691 gen_exception(dc, TT_FP_EXCP);
1694 static int gen_trap_ifnofpu(DisasContext *dc)
1696 #if !defined(CONFIG_USER_ONLY)
1697 if (!dc->fpu_enabled) {
1698 gen_exception(dc, TT_NFPU_INSN);
1699 return 1;
1701 #endif
1702 return 0;
1705 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1707 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1710 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1711 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1713 TCGv_i32 dst, src;
1715 src = gen_load_fpr_F(dc, rs);
1716 dst = gen_dest_fpr_F(dc);
1718 gen(dst, cpu_env, src);
1719 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1721 gen_store_fpr_F(dc, rd, dst);
1724 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1725 void (*gen)(TCGv_i32, TCGv_i32))
1727 TCGv_i32 dst, src;
1729 src = gen_load_fpr_F(dc, rs);
1730 dst = gen_dest_fpr_F(dc);
1732 gen(dst, src);
1734 gen_store_fpr_F(dc, rd, dst);
1737 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1738 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1740 TCGv_i32 dst, src1, src2;
1742 src1 = gen_load_fpr_F(dc, rs1);
1743 src2 = gen_load_fpr_F(dc, rs2);
1744 dst = gen_dest_fpr_F(dc);
1746 gen(dst, cpu_env, src1, src2);
1747 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1749 gen_store_fpr_F(dc, rd, dst);
1752 #ifdef TARGET_SPARC64
1753 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1754 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1756 TCGv_i32 dst, src1, src2;
1758 src1 = gen_load_fpr_F(dc, rs1);
1759 src2 = gen_load_fpr_F(dc, rs2);
1760 dst = gen_dest_fpr_F(dc);
1762 gen(dst, src1, src2);
1764 gen_store_fpr_F(dc, rd, dst);
1766 #endif
1768 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1769 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1771 TCGv_i64 dst, src;
1773 src = gen_load_fpr_D(dc, rs);
1774 dst = gen_dest_fpr_D(dc, rd);
1776 gen(dst, cpu_env, src);
1777 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1779 gen_store_fpr_D(dc, rd, dst);
1782 #ifdef TARGET_SPARC64
1783 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1784 void (*gen)(TCGv_i64, TCGv_i64))
1786 TCGv_i64 dst, src;
1788 src = gen_load_fpr_D(dc, rs);
1789 dst = gen_dest_fpr_D(dc, rd);
1791 gen(dst, src);
1793 gen_store_fpr_D(dc, rd, dst);
1795 #endif
1797 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1798 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1800 TCGv_i64 dst, src1, src2;
1802 src1 = gen_load_fpr_D(dc, rs1);
1803 src2 = gen_load_fpr_D(dc, rs2);
1804 dst = gen_dest_fpr_D(dc, rd);
1806 gen(dst, cpu_env, src1, src2);
1807 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1809 gen_store_fpr_D(dc, rd, dst);
1812 #ifdef TARGET_SPARC64
1813 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1814 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1816 TCGv_i64 dst, src1, src2;
1818 src1 = gen_load_fpr_D(dc, rs1);
1819 src2 = gen_load_fpr_D(dc, rs2);
1820 dst = gen_dest_fpr_D(dc, rd);
1822 gen(dst, src1, src2);
1824 gen_store_fpr_D(dc, rd, dst);
1827 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1828 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1830 TCGv_i64 dst, src1, src2;
1832 src1 = gen_load_fpr_D(dc, rs1);
1833 src2 = gen_load_fpr_D(dc, rs2);
1834 dst = gen_dest_fpr_D(dc, rd);
1836 gen(dst, cpu_gsr, src1, src2);
1838 gen_store_fpr_D(dc, rd, dst);
1841 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1842 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1844 TCGv_i64 dst, src0, src1, src2;
1846 src1 = gen_load_fpr_D(dc, rs1);
1847 src2 = gen_load_fpr_D(dc, rs2);
1848 src0 = gen_load_fpr_D(dc, rd);
1849 dst = gen_dest_fpr_D(dc, rd);
1851 gen(dst, src0, src1, src2);
1853 gen_store_fpr_D(dc, rd, dst);
1855 #endif
1857 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1858 void (*gen)(TCGv_ptr))
1860 gen_op_load_fpr_QT1(QFPREG(rs));
1862 gen(cpu_env);
1863 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1865 gen_op_store_QT0_fpr(QFPREG(rd));
1866 gen_update_fprs_dirty(dc, QFPREG(rd));
1869 #ifdef TARGET_SPARC64
1870 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1871 void (*gen)(TCGv_ptr))
1873 gen_op_load_fpr_QT1(QFPREG(rs));
1875 gen(cpu_env);
1877 gen_op_store_QT0_fpr(QFPREG(rd));
1878 gen_update_fprs_dirty(dc, QFPREG(rd));
1880 #endif
1882 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1883 void (*gen)(TCGv_ptr))
1885 gen_op_load_fpr_QT0(QFPREG(rs1));
1886 gen_op_load_fpr_QT1(QFPREG(rs2));
1888 gen(cpu_env);
1889 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1891 gen_op_store_QT0_fpr(QFPREG(rd));
1892 gen_update_fprs_dirty(dc, QFPREG(rd));
1895 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1896 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1898 TCGv_i64 dst;
1899 TCGv_i32 src1, src2;
1901 src1 = gen_load_fpr_F(dc, rs1);
1902 src2 = gen_load_fpr_F(dc, rs2);
1903 dst = gen_dest_fpr_D(dc, rd);
1905 gen(dst, cpu_env, src1, src2);
1906 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1908 gen_store_fpr_D(dc, rd, dst);
1911 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1912 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1914 TCGv_i64 src1, src2;
1916 src1 = gen_load_fpr_D(dc, rs1);
1917 src2 = gen_load_fpr_D(dc, rs2);
1919 gen(cpu_env, src1, src2);
1920 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1922 gen_op_store_QT0_fpr(QFPREG(rd));
1923 gen_update_fprs_dirty(dc, QFPREG(rd));
1926 #ifdef TARGET_SPARC64
1927 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1928 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1930 TCGv_i64 dst;
1931 TCGv_i32 src;
1933 src = gen_load_fpr_F(dc, rs);
1934 dst = gen_dest_fpr_D(dc, rd);
1936 gen(dst, cpu_env, src);
1937 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1939 gen_store_fpr_D(dc, rd, dst);
1941 #endif
1943 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1944 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1946 TCGv_i64 dst;
1947 TCGv_i32 src;
1949 src = gen_load_fpr_F(dc, rs);
1950 dst = gen_dest_fpr_D(dc, rd);
1952 gen(dst, cpu_env, src);
1954 gen_store_fpr_D(dc, rd, dst);
1957 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1958 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1960 TCGv_i32 dst;
1961 TCGv_i64 src;
1963 src = gen_load_fpr_D(dc, rs);
1964 dst = gen_dest_fpr_F(dc);
1966 gen(dst, cpu_env, src);
1967 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1969 gen_store_fpr_F(dc, rd, dst);
1972 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1973 void (*gen)(TCGv_i32, TCGv_ptr))
1975 TCGv_i32 dst;
1977 gen_op_load_fpr_QT1(QFPREG(rs));
1978 dst = gen_dest_fpr_F(dc);
1980 gen(dst, cpu_env);
1981 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1983 gen_store_fpr_F(dc, rd, dst);
1986 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1987 void (*gen)(TCGv_i64, TCGv_ptr))
1989 TCGv_i64 dst;
1991 gen_op_load_fpr_QT1(QFPREG(rs));
1992 dst = gen_dest_fpr_D(dc, rd);
1994 gen(dst, cpu_env);
1995 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1997 gen_store_fpr_D(dc, rd, dst);
2000 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2001 void (*gen)(TCGv_ptr, TCGv_i32))
2003 TCGv_i32 src;
2005 src = gen_load_fpr_F(dc, rs);
2007 gen(cpu_env, src);
2009 gen_op_store_QT0_fpr(QFPREG(rd));
2010 gen_update_fprs_dirty(dc, QFPREG(rd));
2013 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2014 void (*gen)(TCGv_ptr, TCGv_i64))
2016 TCGv_i64 src;
2018 src = gen_load_fpr_D(dc, rs);
2020 gen(cpu_env, src);
2022 gen_op_store_QT0_fpr(QFPREG(rd));
2023 gen_update_fprs_dirty(dc, QFPREG(rd));
2026 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2027 TCGv addr, int mmu_idx, TCGMemOp memop)
2029 gen_address_mask(dc, addr);
2030 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2033 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2035 TCGv m1 = tcg_const_tl(0xff);
2036 gen_address_mask(dc, addr);
2037 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2038 tcg_temp_free(m1);
2041 /* asi moves */
2042 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2043 typedef enum {
2044 GET_ASI_HELPER,
2045 GET_ASI_EXCP,
2046 GET_ASI_DIRECT,
2047 GET_ASI_DTWINX,
2048 GET_ASI_BLOCK,
2049 GET_ASI_SHORT,
2050 GET_ASI_BCOPY,
2051 GET_ASI_BFILL,
2052 } ASIType;
2054 typedef struct {
2055 ASIType type;
2056 int asi;
2057 int mem_idx;
2058 TCGMemOp memop;
2059 } DisasASI;
2061 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2063 int asi = GET_FIELD(insn, 19, 26);
2064 ASIType type = GET_ASI_HELPER;
2065 int mem_idx = dc->mem_idx;
2067 #ifndef TARGET_SPARC64
2068 /* Before v9, all asis are immediate and privileged. */
2069 if (IS_IMM) {
2070 gen_exception(dc, TT_ILL_INSN);
2071 type = GET_ASI_EXCP;
2072 } else if (supervisor(dc)
2073 /* Note that LEON accepts ASI_USERDATA in user mode, for
2074 use with CASA. Also note that previous versions of
2075 QEMU allowed (and old versions of gcc emitted) ASI_P
2076 for LEON, which is incorrect. */
2077 || (asi == ASI_USERDATA
2078 && (dc->def->features & CPU_FEATURE_CASA))) {
2079 switch (asi) {
2080 case ASI_USERDATA: /* User data access */
2081 mem_idx = MMU_USER_IDX;
2082 type = GET_ASI_DIRECT;
2083 break;
2084 case ASI_KERNELDATA: /* Supervisor data access */
2085 mem_idx = MMU_KERNEL_IDX;
2086 type = GET_ASI_DIRECT;
2087 break;
2088 case ASI_M_BYPASS: /* MMU passthrough */
2089 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2090 mem_idx = MMU_PHYS_IDX;
2091 type = GET_ASI_DIRECT;
2092 break;
2093 case ASI_M_BCOPY: /* Block copy, sta access */
2094 mem_idx = MMU_KERNEL_IDX;
2095 type = GET_ASI_BCOPY;
2096 break;
2097 case ASI_M_BFILL: /* Block fill, stda access */
2098 mem_idx = MMU_KERNEL_IDX;
2099 type = GET_ASI_BFILL;
2100 break;
2102 } else {
2103 gen_exception(dc, TT_PRIV_INSN);
2104 type = GET_ASI_EXCP;
2106 #else
2107 if (IS_IMM) {
2108 asi = dc->asi;
2110 /* With v9, all asis below 0x80 are privileged. */
2111 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2112 down that bit into DisasContext. For the moment that's ok,
2113 since the direct implementations below doesn't have any ASIs
2114 in the restricted [0x30, 0x7f] range, and the check will be
2115 done properly in the helper. */
2116 if (!supervisor(dc) && asi < 0x80) {
2117 gen_exception(dc, TT_PRIV_ACT);
2118 type = GET_ASI_EXCP;
2119 } else {
2120 switch (asi) {
2121 case ASI_REAL: /* Bypass */
2122 case ASI_REAL_IO: /* Bypass, non-cacheable */
2123 case ASI_REAL_L: /* Bypass LE */
2124 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2125 case ASI_TWINX_REAL: /* Real address, twinx */
2126 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2127 case ASI_QUAD_LDD_PHYS:
2128 case ASI_QUAD_LDD_PHYS_L:
2129 mem_idx = MMU_PHYS_IDX;
2130 break;
2131 case ASI_N: /* Nucleus */
2132 case ASI_NL: /* Nucleus LE */
2133 case ASI_TWINX_N:
2134 case ASI_TWINX_NL:
2135 case ASI_NUCLEUS_QUAD_LDD:
2136 case ASI_NUCLEUS_QUAD_LDD_L:
2137 mem_idx = MMU_NUCLEUS_IDX;
2138 break;
2139 case ASI_AIUP: /* As if user primary */
2140 case ASI_AIUPL: /* As if user primary LE */
2141 case ASI_TWINX_AIUP:
2142 case ASI_TWINX_AIUP_L:
2143 case ASI_BLK_AIUP_4V:
2144 case ASI_BLK_AIUP_L_4V:
2145 case ASI_BLK_AIUP:
2146 case ASI_BLK_AIUPL:
2147 mem_idx = MMU_USER_IDX;
2148 break;
2149 case ASI_AIUS: /* As if user secondary */
2150 case ASI_AIUSL: /* As if user secondary LE */
2151 case ASI_TWINX_AIUS:
2152 case ASI_TWINX_AIUS_L:
2153 case ASI_BLK_AIUS_4V:
2154 case ASI_BLK_AIUS_L_4V:
2155 case ASI_BLK_AIUS:
2156 case ASI_BLK_AIUSL:
2157 mem_idx = MMU_USER_SECONDARY_IDX;
2158 break;
2159 case ASI_S: /* Secondary */
2160 case ASI_SL: /* Secondary LE */
2161 case ASI_TWINX_S:
2162 case ASI_TWINX_SL:
2163 case ASI_BLK_COMMIT_S:
2164 case ASI_BLK_S:
2165 case ASI_BLK_SL:
2166 case ASI_FL8_S:
2167 case ASI_FL8_SL:
2168 case ASI_FL16_S:
2169 case ASI_FL16_SL:
2170 if (mem_idx == MMU_USER_IDX) {
2171 mem_idx = MMU_USER_SECONDARY_IDX;
2172 } else if (mem_idx == MMU_KERNEL_IDX) {
2173 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2175 break;
2176 case ASI_P: /* Primary */
2177 case ASI_PL: /* Primary LE */
2178 case ASI_TWINX_P:
2179 case ASI_TWINX_PL:
2180 case ASI_BLK_COMMIT_P:
2181 case ASI_BLK_P:
2182 case ASI_BLK_PL:
2183 case ASI_FL8_P:
2184 case ASI_FL8_PL:
2185 case ASI_FL16_P:
2186 case ASI_FL16_PL:
2187 break;
2189 switch (asi) {
2190 case ASI_REAL:
2191 case ASI_REAL_IO:
2192 case ASI_REAL_L:
2193 case ASI_REAL_IO_L:
2194 case ASI_N:
2195 case ASI_NL:
2196 case ASI_AIUP:
2197 case ASI_AIUPL:
2198 case ASI_AIUS:
2199 case ASI_AIUSL:
2200 case ASI_S:
2201 case ASI_SL:
2202 case ASI_P:
2203 case ASI_PL:
2204 type = GET_ASI_DIRECT;
2205 break;
2206 case ASI_TWINX_REAL:
2207 case ASI_TWINX_REAL_L:
2208 case ASI_TWINX_N:
2209 case ASI_TWINX_NL:
2210 case ASI_TWINX_AIUP:
2211 case ASI_TWINX_AIUP_L:
2212 case ASI_TWINX_AIUS:
2213 case ASI_TWINX_AIUS_L:
2214 case ASI_TWINX_P:
2215 case ASI_TWINX_PL:
2216 case ASI_TWINX_S:
2217 case ASI_TWINX_SL:
2218 case ASI_QUAD_LDD_PHYS:
2219 case ASI_QUAD_LDD_PHYS_L:
2220 case ASI_NUCLEUS_QUAD_LDD:
2221 case ASI_NUCLEUS_QUAD_LDD_L:
2222 type = GET_ASI_DTWINX;
2223 break;
2224 case ASI_BLK_COMMIT_P:
2225 case ASI_BLK_COMMIT_S:
2226 case ASI_BLK_AIUP_4V:
2227 case ASI_BLK_AIUP_L_4V:
2228 case ASI_BLK_AIUP:
2229 case ASI_BLK_AIUPL:
2230 case ASI_BLK_AIUS_4V:
2231 case ASI_BLK_AIUS_L_4V:
2232 case ASI_BLK_AIUS:
2233 case ASI_BLK_AIUSL:
2234 case ASI_BLK_S:
2235 case ASI_BLK_SL:
2236 case ASI_BLK_P:
2237 case ASI_BLK_PL:
2238 type = GET_ASI_BLOCK;
2239 break;
2240 case ASI_FL8_S:
2241 case ASI_FL8_SL:
2242 case ASI_FL8_P:
2243 case ASI_FL8_PL:
2244 memop = MO_UB;
2245 type = GET_ASI_SHORT;
2246 break;
2247 case ASI_FL16_S:
2248 case ASI_FL16_SL:
2249 case ASI_FL16_P:
2250 case ASI_FL16_PL:
2251 memop = MO_TEUW;
2252 type = GET_ASI_SHORT;
2253 break;
2255 /* The little-endian asis all have bit 3 set. */
2256 if (asi & 8) {
2257 memop ^= MO_BSWAP;
2260 #endif
2262 return (DisasASI){ type, asi, mem_idx, memop };
2265 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2266 int insn, TCGMemOp memop)
2268 DisasASI da = get_asi(dc, insn, memop);
2270 switch (da.type) {
2271 case GET_ASI_EXCP:
2272 break;
2273 case GET_ASI_DTWINX: /* Reserved for ldda. */
2274 gen_exception(dc, TT_ILL_INSN);
2275 break;
2276 case GET_ASI_DIRECT:
2277 gen_address_mask(dc, addr);
2278 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2279 break;
2280 default:
2282 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2283 TCGv_i32 r_mop = tcg_const_i32(memop);
2285 save_state(dc);
2286 #ifdef TARGET_SPARC64
2287 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2288 #else
2290 TCGv_i64 t64 = tcg_temp_new_i64();
2291 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2292 tcg_gen_trunc_i64_tl(dst, t64);
2293 tcg_temp_free_i64(t64);
2295 #endif
2296 tcg_temp_free_i32(r_mop);
2297 tcg_temp_free_i32(r_asi);
2299 break;
2303 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2304 int insn, TCGMemOp memop)
2306 DisasASI da = get_asi(dc, insn, memop);
2308 switch (da.type) {
2309 case GET_ASI_EXCP:
2310 break;
2311 case GET_ASI_DTWINX: /* Reserved for stda. */
2312 gen_exception(dc, TT_ILL_INSN);
2313 break;
2314 case GET_ASI_DIRECT:
2315 gen_address_mask(dc, addr);
2316 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2317 break;
2318 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2319 case GET_ASI_BCOPY:
2320 /* Copy 32 bytes from the address in SRC to ADDR. */
2321 /* ??? The original qemu code suggests 4-byte alignment, dropping
2322 the low bits, but the only place I can see this used is in the
2323 Linux kernel with 32 byte alignment, which would make more sense
2324 as a cacheline-style operation. */
2326 TCGv saddr = tcg_temp_new();
2327 TCGv daddr = tcg_temp_new();
2328 TCGv four = tcg_const_tl(4);
2329 TCGv_i32 tmp = tcg_temp_new_i32();
2330 int i;
2332 tcg_gen_andi_tl(saddr, src, -4);
2333 tcg_gen_andi_tl(daddr, addr, -4);
2334 for (i = 0; i < 32; i += 4) {
2335 /* Since the loads and stores are paired, allow the
2336 copy to happen in the host endianness. */
2337 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2338 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2339 tcg_gen_add_tl(saddr, saddr, four);
2340 tcg_gen_add_tl(daddr, daddr, four);
2343 tcg_temp_free(saddr);
2344 tcg_temp_free(daddr);
2345 tcg_temp_free(four);
2346 tcg_temp_free_i32(tmp);
2348 break;
2349 #endif
2350 default:
2352 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2353 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2355 save_state(dc);
2356 #ifdef TARGET_SPARC64
2357 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2358 #else
2360 TCGv_i64 t64 = tcg_temp_new_i64();
2361 tcg_gen_extu_tl_i64(t64, src);
2362 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2363 tcg_temp_free_i64(t64);
2365 #endif
2366 tcg_temp_free_i32(r_mop);
2367 tcg_temp_free_i32(r_asi);
2369 /* A write to a TLB register may alter page maps. End the TB. */
2370 dc->npc = DYNAMIC_PC;
2372 break;
2376 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2377 TCGv addr, int insn)
2379 DisasASI da = get_asi(dc, insn, MO_TEUL);
2381 switch (da.type) {
2382 case GET_ASI_EXCP:
2383 break;
2384 case GET_ASI_DIRECT:
2385 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2386 break;
2387 default:
2388 /* ??? Should be DAE_invalid_asi. */
2389 gen_exception(dc, TT_DATA_ACCESS);
2390 break;
2394 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2395 int insn, int rd)
2397 DisasASI da = get_asi(dc, insn, MO_TEUL);
2398 TCGv oldv;
2400 switch (da.type) {
2401 case GET_ASI_EXCP:
2402 return;
2403 case GET_ASI_DIRECT:
2404 oldv = tcg_temp_new();
2405 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2406 da.mem_idx, da.memop);
2407 gen_store_gpr(dc, rd, oldv);
2408 tcg_temp_free(oldv);
2409 break;
2410 default:
2411 /* ??? Should be DAE_invalid_asi. */
2412 gen_exception(dc, TT_DATA_ACCESS);
2413 break;
2417 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2419 DisasASI da = get_asi(dc, insn, MO_UB);
2421 switch (da.type) {
2422 case GET_ASI_EXCP:
2423 break;
2424 case GET_ASI_DIRECT:
2425 gen_ldstub(dc, dst, addr, da.mem_idx);
2426 break;
2427 default:
2428 /* ??? Should be DAE_invalid_asi. */
2429 gen_exception(dc, TT_DATA_ACCESS);
2430 break;
2433 #endif
2435 #ifdef TARGET_SPARC64
2436 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2437 int insn, int size, int rd)
2439 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2440 TCGv_i32 d32;
2441 TCGv_i64 d64;
2443 switch (da.type) {
2444 case GET_ASI_EXCP:
2445 break;
2447 case GET_ASI_DIRECT:
2448 gen_address_mask(dc, addr);
2449 switch (size) {
2450 case 4:
2451 d32 = gen_dest_fpr_F(dc);
2452 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2453 gen_store_fpr_F(dc, rd, d32);
2454 break;
2455 case 8:
2456 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2457 da.memop | MO_ALIGN_4);
2458 break;
2459 case 16:
2460 d64 = tcg_temp_new_i64();
2461 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2462 tcg_gen_addi_tl(addr, addr, 8);
2463 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2464 da.memop | MO_ALIGN_4);
2465 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2466 tcg_temp_free_i64(d64);
2467 break;
2468 default:
2469 g_assert_not_reached();
2471 break;
2473 case GET_ASI_BLOCK:
2474 /* Valid for lddfa on aligned registers only. */
2475 if (size == 8 && (rd & 7) == 0) {
2476 TCGMemOp memop;
2477 TCGv eight;
2478 int i;
2480 gen_address_mask(dc, addr);
2482 /* The first operation checks required alignment. */
2483 memop = da.memop | MO_ALIGN_64;
2484 eight = tcg_const_tl(8);
2485 for (i = 0; ; ++i) {
2486 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2487 da.mem_idx, memop);
2488 if (i == 7) {
2489 break;
2491 tcg_gen_add_tl(addr, addr, eight);
2492 memop = da.memop;
2494 tcg_temp_free(eight);
2495 } else {
2496 gen_exception(dc, TT_ILL_INSN);
2498 break;
2500 case GET_ASI_SHORT:
2501 /* Valid for lddfa only. */
2502 if (size == 8) {
2503 gen_address_mask(dc, addr);
2504 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2505 } else {
2506 gen_exception(dc, TT_ILL_INSN);
2508 break;
2510 default:
2512 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2513 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2515 save_state(dc);
2516 /* According to the table in the UA2011 manual, the only
2517 other asis that are valid for ldfa/lddfa/ldqfa are
2518 the NO_FAULT asis. We still need a helper for these,
2519 but we can just use the integer asi helper for them. */
2520 switch (size) {
2521 case 4:
2522 d64 = tcg_temp_new_i64();
2523 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2524 d32 = gen_dest_fpr_F(dc);
2525 tcg_gen_extrl_i64_i32(d32, d64);
2526 tcg_temp_free_i64(d64);
2527 gen_store_fpr_F(dc, rd, d32);
2528 break;
2529 case 8:
2530 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2531 break;
2532 case 16:
2533 d64 = tcg_temp_new_i64();
2534 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2535 tcg_gen_addi_tl(addr, addr, 8);
2536 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2537 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2538 tcg_temp_free_i64(d64);
2539 break;
2540 default:
2541 g_assert_not_reached();
2543 tcg_temp_free_i32(r_mop);
2544 tcg_temp_free_i32(r_asi);
2546 break;
2550 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2551 int insn, int size, int rd)
2553 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2554 TCGv_i32 d32;
2556 switch (da.type) {
2557 case GET_ASI_EXCP:
2558 break;
2560 case GET_ASI_DIRECT:
2561 gen_address_mask(dc, addr);
2562 switch (size) {
2563 case 4:
2564 d32 = gen_load_fpr_F(dc, rd);
2565 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2566 break;
2567 case 8:
2568 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2569 da.memop | MO_ALIGN_4);
2570 break;
2571 case 16:
2572 /* Only 4-byte alignment required. However, it is legal for the
2573 cpu to signal the alignment fault, and the OS trap handler is
2574 required to fix it up. Requiring 16-byte alignment here avoids
2575 having to probe the second page before performing the first
2576 write. */
2577 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2578 da.memop | MO_ALIGN_16);
2579 tcg_gen_addi_tl(addr, addr, 8);
2580 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2581 break;
2582 default:
2583 g_assert_not_reached();
2585 break;
2587 case GET_ASI_BLOCK:
2588 /* Valid for stdfa on aligned registers only. */
2589 if (size == 8 && (rd & 7) == 0) {
2590 TCGMemOp memop;
2591 TCGv eight;
2592 int i;
2594 gen_address_mask(dc, addr);
2596 /* The first operation checks required alignment. */
2597 memop = da.memop | MO_ALIGN_64;
2598 eight = tcg_const_tl(8);
2599 for (i = 0; ; ++i) {
2600 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2601 da.mem_idx, memop);
2602 if (i == 7) {
2603 break;
2605 tcg_gen_add_tl(addr, addr, eight);
2606 memop = da.memop;
2608 tcg_temp_free(eight);
2609 } else {
2610 gen_exception(dc, TT_ILL_INSN);
2612 break;
2614 case GET_ASI_SHORT:
2615 /* Valid for stdfa only. */
2616 if (size == 8) {
2617 gen_address_mask(dc, addr);
2618 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2619 } else {
2620 gen_exception(dc, TT_ILL_INSN);
2622 break;
2624 default:
2625 /* According to the table in the UA2011 manual, the only
2626 other asis that are valid for ldfa/lddfa/ldqfa are
2627 the PST* asis, which aren't currently handled. */
2628 gen_exception(dc, TT_ILL_INSN);
2629 break;
2633 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2635 DisasASI da = get_asi(dc, insn, MO_TEQ);
2636 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2637 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2639 switch (da.type) {
2640 case GET_ASI_EXCP:
2641 return;
2643 case GET_ASI_DTWINX:
2644 gen_address_mask(dc, addr);
2645 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2646 tcg_gen_addi_tl(addr, addr, 8);
2647 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2648 break;
2650 case GET_ASI_DIRECT:
2652 TCGv_i64 tmp = tcg_temp_new_i64();
2654 gen_address_mask(dc, addr);
2655 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2657 /* Note that LE ldda acts as if each 32-bit register
2658 result is byte swapped. Having just performed one
2659 64-bit bswap, we need now to swap the writebacks. */
2660 if ((da.memop & MO_BSWAP) == MO_TE) {
2661 tcg_gen_extr32_i64(lo, hi, tmp);
2662 } else {
2663 tcg_gen_extr32_i64(hi, lo, tmp);
2665 tcg_temp_free_i64(tmp);
2667 break;
2669 default:
2670 /* ??? In theory we've handled all of the ASIs that are valid
2671 for ldda, and this should raise DAE_invalid_asi. However,
2672 real hardware allows others. This can be seen with e.g.
2673 FreeBSD 10.3 wrt ASI_IC_TAG. */
2675 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2676 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2677 TCGv_i64 tmp = tcg_temp_new_i64();
2679 save_state(dc);
2680 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2681 tcg_temp_free_i32(r_asi);
2682 tcg_temp_free_i32(r_mop);
2684 /* See above. */
2685 if ((da.memop & MO_BSWAP) == MO_TE) {
2686 tcg_gen_extr32_i64(lo, hi, tmp);
2687 } else {
2688 tcg_gen_extr32_i64(hi, lo, tmp);
2690 tcg_temp_free_i64(tmp);
2692 break;
2695 gen_store_gpr(dc, rd, hi);
2696 gen_store_gpr(dc, rd + 1, lo);
2699 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2700 int insn, int rd)
2702 DisasASI da = get_asi(dc, insn, MO_TEQ);
2703 TCGv lo = gen_load_gpr(dc, rd + 1);
2705 switch (da.type) {
2706 case GET_ASI_EXCP:
2707 break;
2709 case GET_ASI_DTWINX:
2710 gen_address_mask(dc, addr);
2711 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2712 tcg_gen_addi_tl(addr, addr, 8);
2713 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2714 break;
2716 case GET_ASI_DIRECT:
2718 TCGv_i64 t64 = tcg_temp_new_i64();
2720 /* Note that LE stda acts as if each 32-bit register result is
2721 byte swapped. We will perform one 64-bit LE store, so now
2722 we must swap the order of the construction. */
2723 if ((da.memop & MO_BSWAP) == MO_TE) {
2724 tcg_gen_concat32_i64(t64, lo, hi);
2725 } else {
2726 tcg_gen_concat32_i64(t64, hi, lo);
2728 gen_address_mask(dc, addr);
2729 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2730 tcg_temp_free_i64(t64);
2732 break;
2734 default:
2735 /* ??? In theory we've handled all of the ASIs that are valid
2736 for stda, and this should raise DAE_invalid_asi. */
2738 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2739 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2740 TCGv_i64 t64 = tcg_temp_new_i64();
2742 /* See above. */
2743 if ((da.memop & MO_BSWAP) == MO_TE) {
2744 tcg_gen_concat32_i64(t64, lo, hi);
2745 } else {
2746 tcg_gen_concat32_i64(t64, hi, lo);
2749 save_state(dc);
2750 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2751 tcg_temp_free_i32(r_mop);
2752 tcg_temp_free_i32(r_asi);
2753 tcg_temp_free_i64(t64);
2755 break;
2759 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2760 int insn, int rd)
2762 DisasASI da = get_asi(dc, insn, MO_TEQ);
2763 TCGv oldv;
2765 switch (da.type) {
2766 case GET_ASI_EXCP:
2767 return;
2768 case GET_ASI_DIRECT:
2769 oldv = tcg_temp_new();
2770 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2771 da.mem_idx, da.memop);
2772 gen_store_gpr(dc, rd, oldv);
2773 tcg_temp_free(oldv);
2774 break;
2775 default:
2776 /* ??? Should be DAE_invalid_asi. */
2777 gen_exception(dc, TT_DATA_ACCESS);
2778 break;
2782 #elif !defined(CONFIG_USER_ONLY)
2783 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2785 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2786 whereby "rd + 1" elicits "error: array subscript is above array".
2787 Since we have already asserted that rd is even, the semantics
2788 are unchanged. */
2789 TCGv lo = gen_dest_gpr(dc, rd | 1);
2790 TCGv hi = gen_dest_gpr(dc, rd);
2791 TCGv_i64 t64 = tcg_temp_new_i64();
2792 DisasASI da = get_asi(dc, insn, MO_TEQ);
2794 switch (da.type) {
2795 case GET_ASI_EXCP:
2796 tcg_temp_free_i64(t64);
2797 return;
2798 case GET_ASI_DIRECT:
2799 gen_address_mask(dc, addr);
2800 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2801 break;
2802 default:
2804 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2805 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2807 save_state(dc);
2808 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2809 tcg_temp_free_i32(r_mop);
2810 tcg_temp_free_i32(r_asi);
2812 break;
2815 tcg_gen_extr_i64_i32(lo, hi, t64);
2816 tcg_temp_free_i64(t64);
2817 gen_store_gpr(dc, rd | 1, lo);
2818 gen_store_gpr(dc, rd, hi);
2821 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2822 int insn, int rd)
2824 DisasASI da = get_asi(dc, insn, MO_TEQ);
2825 TCGv lo = gen_load_gpr(dc, rd + 1);
2826 TCGv_i64 t64 = tcg_temp_new_i64();
2828 tcg_gen_concat_tl_i64(t64, lo, hi);
2830 switch (da.type) {
2831 case GET_ASI_EXCP:
2832 break;
2833 case GET_ASI_DIRECT:
2834 gen_address_mask(dc, addr);
2835 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2836 break;
2837 case GET_ASI_BFILL:
2838 /* Store 32 bytes of T64 to ADDR. */
2839 /* ??? The original qemu code suggests 8-byte alignment, dropping
2840 the low bits, but the only place I can see this used is in the
2841 Linux kernel with 32 byte alignment, which would make more sense
2842 as a cacheline-style operation. */
2844 TCGv d_addr = tcg_temp_new();
2845 TCGv eight = tcg_const_tl(8);
2846 int i;
2848 tcg_gen_andi_tl(d_addr, addr, -8);
2849 for (i = 0; i < 32; i += 8) {
2850 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2851 tcg_gen_add_tl(d_addr, d_addr, eight);
2854 tcg_temp_free(d_addr);
2855 tcg_temp_free(eight);
2857 break;
2858 default:
2860 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2861 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2863 save_state(dc);
2864 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2865 tcg_temp_free_i32(r_mop);
2866 tcg_temp_free_i32(r_asi);
2868 break;
2871 tcg_temp_free_i64(t64);
2873 #endif
2875 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2877 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2878 return gen_load_gpr(dc, rs1);
2881 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2883 if (IS_IMM) { /* immediate */
2884 target_long simm = GET_FIELDs(insn, 19, 31);
2885 TCGv t = get_temp_tl(dc);
2886 tcg_gen_movi_tl(t, simm);
2887 return t;
2888 } else { /* register */
2889 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2890 return gen_load_gpr(dc, rs2);
2894 #ifdef TARGET_SPARC64
2895 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2897 TCGv_i32 c32, zero, dst, s1, s2;
2899 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2900 or fold the comparison down to 32 bits and use movcond_i32. Choose
2901 the later. */
2902 c32 = tcg_temp_new_i32();
2903 if (cmp->is_bool) {
2904 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2905 } else {
2906 TCGv_i64 c64 = tcg_temp_new_i64();
2907 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2908 tcg_gen_extrl_i64_i32(c32, c64);
2909 tcg_temp_free_i64(c64);
2912 s1 = gen_load_fpr_F(dc, rs);
2913 s2 = gen_load_fpr_F(dc, rd);
2914 dst = gen_dest_fpr_F(dc);
2915 zero = tcg_const_i32(0);
2917 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2919 tcg_temp_free_i32(c32);
2920 tcg_temp_free_i32(zero);
2921 gen_store_fpr_F(dc, rd, dst);
2924 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2926 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2927 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2928 gen_load_fpr_D(dc, rs),
2929 gen_load_fpr_D(dc, rd));
2930 gen_store_fpr_D(dc, rd, dst);
2933 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2935 int qd = QFPREG(rd);
2936 int qs = QFPREG(rs);
2938 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2939 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2940 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2941 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2943 gen_update_fprs_dirty(dc, qd);
2946 #ifndef CONFIG_USER_ONLY
2947 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2949 TCGv_i32 r_tl = tcg_temp_new_i32();
2951 /* load env->tl into r_tl */
2952 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2954 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2955 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2957 /* calculate offset to current trap state from env->ts, reuse r_tl */
2958 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2959 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2961 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2963 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2964 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2965 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2966 tcg_temp_free_ptr(r_tl_tmp);
2969 tcg_temp_free_i32(r_tl);
2971 #endif
2973 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2974 int width, bool cc, bool left)
2976 TCGv lo1, lo2, t1, t2;
2977 uint64_t amask, tabl, tabr;
2978 int shift, imask, omask;
2980 if (cc) {
2981 tcg_gen_mov_tl(cpu_cc_src, s1);
2982 tcg_gen_mov_tl(cpu_cc_src2, s2);
2983 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2984 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2985 dc->cc_op = CC_OP_SUB;
2988 /* Theory of operation: there are two tables, left and right (not to
2989 be confused with the left and right versions of the opcode). These
2990 are indexed by the low 3 bits of the inputs. To make things "easy",
2991 these tables are loaded into two constants, TABL and TABR below.
2992 The operation index = (input & imask) << shift calculates the index
2993 into the constant, while val = (table >> index) & omask calculates
2994 the value we're looking for. */
2995 switch (width) {
2996 case 8:
2997 imask = 0x7;
2998 shift = 3;
2999 omask = 0xff;
3000 if (left) {
3001 tabl = 0x80c0e0f0f8fcfeffULL;
3002 tabr = 0xff7f3f1f0f070301ULL;
3003 } else {
3004 tabl = 0x0103070f1f3f7fffULL;
3005 tabr = 0xfffefcf8f0e0c080ULL;
3007 break;
3008 case 16:
3009 imask = 0x6;
3010 shift = 1;
3011 omask = 0xf;
3012 if (left) {
3013 tabl = 0x8cef;
3014 tabr = 0xf731;
3015 } else {
3016 tabl = 0x137f;
3017 tabr = 0xfec8;
3019 break;
3020 case 32:
3021 imask = 0x4;
3022 shift = 0;
3023 omask = 0x3;
3024 if (left) {
3025 tabl = (2 << 2) | 3;
3026 tabr = (3 << 2) | 1;
3027 } else {
3028 tabl = (1 << 2) | 3;
3029 tabr = (3 << 2) | 2;
3031 break;
3032 default:
3033 abort();
3036 lo1 = tcg_temp_new();
3037 lo2 = tcg_temp_new();
3038 tcg_gen_andi_tl(lo1, s1, imask);
3039 tcg_gen_andi_tl(lo2, s2, imask);
3040 tcg_gen_shli_tl(lo1, lo1, shift);
3041 tcg_gen_shli_tl(lo2, lo2, shift);
3043 t1 = tcg_const_tl(tabl);
3044 t2 = tcg_const_tl(tabr);
3045 tcg_gen_shr_tl(lo1, t1, lo1);
3046 tcg_gen_shr_tl(lo2, t2, lo2);
3047 tcg_gen_andi_tl(dst, lo1, omask);
3048 tcg_gen_andi_tl(lo2, lo2, omask);
3050 amask = -8;
3051 if (AM_CHECK(dc)) {
3052 amask &= 0xffffffffULL;
3054 tcg_gen_andi_tl(s1, s1, amask);
3055 tcg_gen_andi_tl(s2, s2, amask);
3057 /* We want to compute
3058 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3059 We've already done dst = lo1, so this reduces to
3060 dst &= (s1 == s2 ? -1 : lo2)
3061 Which we perform by
3062 lo2 |= -(s1 == s2)
3063 dst &= lo2
3065 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3066 tcg_gen_neg_tl(t1, t1);
3067 tcg_gen_or_tl(lo2, lo2, t1);
3068 tcg_gen_and_tl(dst, dst, lo2);
3070 tcg_temp_free(lo1);
3071 tcg_temp_free(lo2);
3072 tcg_temp_free(t1);
3073 tcg_temp_free(t2);
3076 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3078 TCGv tmp = tcg_temp_new();
3080 tcg_gen_add_tl(tmp, s1, s2);
3081 tcg_gen_andi_tl(dst, tmp, -8);
3082 if (left) {
3083 tcg_gen_neg_tl(tmp, tmp);
3085 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3087 tcg_temp_free(tmp);
3090 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3092 TCGv t1, t2, shift;
3094 t1 = tcg_temp_new();
3095 t2 = tcg_temp_new();
3096 shift = tcg_temp_new();
3098 tcg_gen_andi_tl(shift, gsr, 7);
3099 tcg_gen_shli_tl(shift, shift, 3);
3100 tcg_gen_shl_tl(t1, s1, shift);
3102 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3103 shift of (up to 63) followed by a constant shift of 1. */
3104 tcg_gen_xori_tl(shift, shift, 63);
3105 tcg_gen_shr_tl(t2, s2, shift);
3106 tcg_gen_shri_tl(t2, t2, 1);
3108 tcg_gen_or_tl(dst, t1, t2);
3110 tcg_temp_free(t1);
3111 tcg_temp_free(t2);
3112 tcg_temp_free(shift);
3114 #endif
3116 #define CHECK_IU_FEATURE(dc, FEATURE) \
3117 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3118 goto illegal_insn;
3119 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3120 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3121 goto nfpu_insn;
3123 /* before an instruction, dc->pc must be static */
3124 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3126 unsigned int opc, rs1, rs2, rd;
3127 TCGv cpu_src1, cpu_src2;
3128 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3129 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3130 target_long simm;
3132 opc = GET_FIELD(insn, 0, 1);
3133 rd = GET_FIELD(insn, 2, 6);
3135 switch (opc) {
3136 case 0: /* branches/sethi */
3138 unsigned int xop = GET_FIELD(insn, 7, 9);
3139 int32_t target;
3140 switch (xop) {
3141 #ifdef TARGET_SPARC64
3142 case 0x1: /* V9 BPcc */
3144 int cc;
3146 target = GET_FIELD_SP(insn, 0, 18);
3147 target = sign_extend(target, 19);
3148 target <<= 2;
3149 cc = GET_FIELD_SP(insn, 20, 21);
3150 if (cc == 0)
3151 do_branch(dc, target, insn, 0);
3152 else if (cc == 2)
3153 do_branch(dc, target, insn, 1);
3154 else
3155 goto illegal_insn;
3156 goto jmp_insn;
3158 case 0x3: /* V9 BPr */
3160 target = GET_FIELD_SP(insn, 0, 13) |
3161 (GET_FIELD_SP(insn, 20, 21) << 14);
3162 target = sign_extend(target, 16);
3163 target <<= 2;
3164 cpu_src1 = get_src1(dc, insn);
3165 do_branch_reg(dc, target, insn, cpu_src1);
3166 goto jmp_insn;
3168 case 0x5: /* V9 FBPcc */
3170 int cc = GET_FIELD_SP(insn, 20, 21);
3171 if (gen_trap_ifnofpu(dc)) {
3172 goto jmp_insn;
3174 target = GET_FIELD_SP(insn, 0, 18);
3175 target = sign_extend(target, 19);
3176 target <<= 2;
3177 do_fbranch(dc, target, insn, cc);
3178 goto jmp_insn;
3180 #else
3181 case 0x7: /* CBN+x */
3183 goto ncp_insn;
3185 #endif
3186 case 0x2: /* BN+x */
3188 target = GET_FIELD(insn, 10, 31);
3189 target = sign_extend(target, 22);
3190 target <<= 2;
3191 do_branch(dc, target, insn, 0);
3192 goto jmp_insn;
3194 case 0x6: /* FBN+x */
3196 if (gen_trap_ifnofpu(dc)) {
3197 goto jmp_insn;
3199 target = GET_FIELD(insn, 10, 31);
3200 target = sign_extend(target, 22);
3201 target <<= 2;
3202 do_fbranch(dc, target, insn, 0);
3203 goto jmp_insn;
3205 case 0x4: /* SETHI */
3206 /* Special-case %g0 because that's the canonical nop. */
3207 if (rd) {
3208 uint32_t value = GET_FIELD(insn, 10, 31);
3209 TCGv t = gen_dest_gpr(dc, rd);
3210 tcg_gen_movi_tl(t, value << 10);
3211 gen_store_gpr(dc, rd, t);
3213 break;
3214 case 0x0: /* UNIMPL */
3215 default:
3216 goto illegal_insn;
3218 break;
3220 break;
3221 case 1: /*CALL*/
3223 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3224 TCGv o7 = gen_dest_gpr(dc, 15);
3226 tcg_gen_movi_tl(o7, dc->pc);
3227 gen_store_gpr(dc, 15, o7);
3228 target += dc->pc;
3229 gen_mov_pc_npc(dc);
3230 #ifdef TARGET_SPARC64
3231 if (unlikely(AM_CHECK(dc))) {
3232 target &= 0xffffffffULL;
3234 #endif
3235 dc->npc = target;
3237 goto jmp_insn;
3238 case 2: /* FPU & Logical Operations */
3240 unsigned int xop = GET_FIELD(insn, 7, 12);
3241 TCGv cpu_dst = get_temp_tl(dc);
3242 TCGv cpu_tmp0;
3244 if (xop == 0x3a) { /* generate trap */
3245 int cond = GET_FIELD(insn, 3, 6);
3246 TCGv_i32 trap;
3247 TCGLabel *l1 = NULL;
3248 int mask;
3250 if (cond == 0) {
3251 /* Trap never. */
3252 break;
3255 save_state(dc);
3257 if (cond != 8) {
3258 /* Conditional trap. */
3259 DisasCompare cmp;
3260 #ifdef TARGET_SPARC64
3261 /* V9 icc/xcc */
3262 int cc = GET_FIELD_SP(insn, 11, 12);
3263 if (cc == 0) {
3264 gen_compare(&cmp, 0, cond, dc);
3265 } else if (cc == 2) {
3266 gen_compare(&cmp, 1, cond, dc);
3267 } else {
3268 goto illegal_insn;
3270 #else
3271 gen_compare(&cmp, 0, cond, dc);
3272 #endif
3273 l1 = gen_new_label();
3274 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3275 cmp.c1, cmp.c2, l1);
3276 free_compare(&cmp);
3279 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3280 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3282 /* Don't use the normal temporaries, as they may well have
3283 gone out of scope with the branch above. While we're
3284 doing that we might as well pre-truncate to 32-bit. */
3285 trap = tcg_temp_new_i32();
3287 rs1 = GET_FIELD_SP(insn, 14, 18);
3288 if (IS_IMM) {
3289 rs2 = GET_FIELD_SP(insn, 0, 6);
3290 if (rs1 == 0) {
3291 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3292 /* Signal that the trap value is fully constant. */
3293 mask = 0;
3294 } else {
3295 TCGv t1 = gen_load_gpr(dc, rs1);
3296 tcg_gen_trunc_tl_i32(trap, t1);
3297 tcg_gen_addi_i32(trap, trap, rs2);
3299 } else {
3300 TCGv t1, t2;
3301 rs2 = GET_FIELD_SP(insn, 0, 4);
3302 t1 = gen_load_gpr(dc, rs1);
3303 t2 = gen_load_gpr(dc, rs2);
3304 tcg_gen_add_tl(t1, t1, t2);
3305 tcg_gen_trunc_tl_i32(trap, t1);
3307 if (mask != 0) {
3308 tcg_gen_andi_i32(trap, trap, mask);
3309 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3312 gen_helper_raise_exception(cpu_env, trap);
3313 tcg_temp_free_i32(trap);
3315 if (cond == 8) {
3316 /* An unconditional trap ends the TB. */
3317 dc->is_br = 1;
3318 goto jmp_insn;
3319 } else {
3320 /* A conditional trap falls through to the next insn. */
3321 gen_set_label(l1);
3322 break;
3324 } else if (xop == 0x28) {
3325 rs1 = GET_FIELD(insn, 13, 17);
3326 switch(rs1) {
3327 case 0: /* rdy */
3328 #ifndef TARGET_SPARC64
3329 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3330 manual, rdy on the microSPARC
3331 II */
3332 case 0x0f: /* stbar in the SPARCv8 manual,
3333 rdy on the microSPARC II */
3334 case 0x10 ... 0x1f: /* implementation-dependent in the
3335 SPARCv8 manual, rdy on the
3336 microSPARC II */
3337 /* Read Asr17 */
3338 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3339 TCGv t = gen_dest_gpr(dc, rd);
3340 /* Read Asr17 for a Leon3 monoprocessor */
3341 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3342 gen_store_gpr(dc, rd, t);
3343 break;
3345 #endif
3346 gen_store_gpr(dc, rd, cpu_y);
3347 break;
3348 #ifdef TARGET_SPARC64
3349 case 0x2: /* V9 rdccr */
3350 update_psr(dc);
3351 gen_helper_rdccr(cpu_dst, cpu_env);
3352 gen_store_gpr(dc, rd, cpu_dst);
3353 break;
3354 case 0x3: /* V9 rdasi */
3355 tcg_gen_movi_tl(cpu_dst, dc->asi);
3356 gen_store_gpr(dc, rd, cpu_dst);
3357 break;
3358 case 0x4: /* V9 rdtick */
3360 TCGv_ptr r_tickptr;
3361 TCGv_i32 r_const;
3363 r_tickptr = tcg_temp_new_ptr();
3364 r_const = tcg_const_i32(dc->mem_idx);
3365 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3366 offsetof(CPUSPARCState, tick));
3367 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3368 r_const);
3369 tcg_temp_free_ptr(r_tickptr);
3370 tcg_temp_free_i32(r_const);
3371 gen_store_gpr(dc, rd, cpu_dst);
3373 break;
3374 case 0x5: /* V9 rdpc */
3376 TCGv t = gen_dest_gpr(dc, rd);
3377 if (unlikely(AM_CHECK(dc))) {
3378 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3379 } else {
3380 tcg_gen_movi_tl(t, dc->pc);
3382 gen_store_gpr(dc, rd, t);
3384 break;
3385 case 0x6: /* V9 rdfprs */
3386 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3387 gen_store_gpr(dc, rd, cpu_dst);
3388 break;
3389 case 0xf: /* V9 membar */
3390 break; /* no effect */
3391 case 0x13: /* Graphics Status */
3392 if (gen_trap_ifnofpu(dc)) {
3393 goto jmp_insn;
3395 gen_store_gpr(dc, rd, cpu_gsr);
3396 break;
3397 case 0x16: /* Softint */
3398 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3399 offsetof(CPUSPARCState, softint));
3400 gen_store_gpr(dc, rd, cpu_dst);
3401 break;
3402 case 0x17: /* Tick compare */
3403 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3404 break;
3405 case 0x18: /* System tick */
3407 TCGv_ptr r_tickptr;
3408 TCGv_i32 r_const;
3410 r_tickptr = tcg_temp_new_ptr();
3411 r_const = tcg_const_i32(dc->mem_idx);
3412 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3413 offsetof(CPUSPARCState, stick));
3414 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3415 r_const);
3416 tcg_temp_free_ptr(r_tickptr);
3417 tcg_temp_free_i32(r_const);
3418 gen_store_gpr(dc, rd, cpu_dst);
3420 break;
3421 case 0x19: /* System tick compare */
3422 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3423 break;
3424 case 0x10: /* Performance Control */
3425 case 0x11: /* Performance Instrumentation Counter */
3426 case 0x12: /* Dispatch Control */
3427 case 0x14: /* Softint set, WO */
3428 case 0x15: /* Softint clear, WO */
3429 #endif
3430 default:
3431 goto illegal_insn;
3433 #if !defined(CONFIG_USER_ONLY)
3434 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3435 #ifndef TARGET_SPARC64
3436 if (!supervisor(dc)) {
3437 goto priv_insn;
3439 update_psr(dc);
3440 gen_helper_rdpsr(cpu_dst, cpu_env);
3441 #else
3442 CHECK_IU_FEATURE(dc, HYPV);
3443 if (!hypervisor(dc))
3444 goto priv_insn;
3445 rs1 = GET_FIELD(insn, 13, 17);
3446 switch (rs1) {
3447 case 0: // hpstate
3448 // gen_op_rdhpstate();
3449 break;
3450 case 1: // htstate
3451 // gen_op_rdhtstate();
3452 break;
3453 case 3: // hintp
3454 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3455 break;
3456 case 5: // htba
3457 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3458 break;
3459 case 6: // hver
3460 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3461 break;
3462 case 31: // hstick_cmpr
3463 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3464 break;
3465 default:
3466 goto illegal_insn;
3468 #endif
3469 gen_store_gpr(dc, rd, cpu_dst);
3470 break;
3471 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3472 if (!supervisor(dc)) {
3473 goto priv_insn;
3475 cpu_tmp0 = get_temp_tl(dc);
3476 #ifdef TARGET_SPARC64
3477 rs1 = GET_FIELD(insn, 13, 17);
3478 switch (rs1) {
3479 case 0: // tpc
3481 TCGv_ptr r_tsptr;
3483 r_tsptr = tcg_temp_new_ptr();
3484 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3485 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3486 offsetof(trap_state, tpc));
3487 tcg_temp_free_ptr(r_tsptr);
3489 break;
3490 case 1: // tnpc
3492 TCGv_ptr r_tsptr;
3494 r_tsptr = tcg_temp_new_ptr();
3495 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3496 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3497 offsetof(trap_state, tnpc));
3498 tcg_temp_free_ptr(r_tsptr);
3500 break;
3501 case 2: // tstate
3503 TCGv_ptr r_tsptr;
3505 r_tsptr = tcg_temp_new_ptr();
3506 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3507 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3508 offsetof(trap_state, tstate));
3509 tcg_temp_free_ptr(r_tsptr);
3511 break;
3512 case 3: // tt
3514 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3516 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3517 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3518 offsetof(trap_state, tt));
3519 tcg_temp_free_ptr(r_tsptr);
3521 break;
3522 case 4: // tick
3524 TCGv_ptr r_tickptr;
3525 TCGv_i32 r_const;
3527 r_tickptr = tcg_temp_new_ptr();
3528 r_const = tcg_const_i32(dc->mem_idx);
3529 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3530 offsetof(CPUSPARCState, tick));
3531 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3532 r_tickptr, r_const);
3533 tcg_temp_free_ptr(r_tickptr);
3534 tcg_temp_free_i32(r_const);
3536 break;
3537 case 5: // tba
3538 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3539 break;
3540 case 6: // pstate
3541 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3542 offsetof(CPUSPARCState, pstate));
3543 break;
3544 case 7: // tl
3545 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3546 offsetof(CPUSPARCState, tl));
3547 break;
3548 case 8: // pil
3549 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3550 offsetof(CPUSPARCState, psrpil));
3551 break;
3552 case 9: // cwp
3553 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3554 break;
3555 case 10: // cansave
3556 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3557 offsetof(CPUSPARCState, cansave));
3558 break;
3559 case 11: // canrestore
3560 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3561 offsetof(CPUSPARCState, canrestore));
3562 break;
3563 case 12: // cleanwin
3564 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3565 offsetof(CPUSPARCState, cleanwin));
3566 break;
3567 case 13: // otherwin
3568 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3569 offsetof(CPUSPARCState, otherwin));
3570 break;
3571 case 14: // wstate
3572 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3573 offsetof(CPUSPARCState, wstate));
3574 break;
3575 case 16: // UA2005 gl
3576 CHECK_IU_FEATURE(dc, GL);
3577 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3578 offsetof(CPUSPARCState, gl));
3579 break;
3580 case 26: // UA2005 strand status
3581 CHECK_IU_FEATURE(dc, HYPV);
3582 if (!hypervisor(dc))
3583 goto priv_insn;
3584 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3585 break;
3586 case 31: // ver
3587 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3588 break;
3589 case 15: // fq
3590 default:
3591 goto illegal_insn;
3593 #else
3594 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3595 #endif
3596 gen_store_gpr(dc, rd, cpu_tmp0);
3597 break;
3598 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3599 #ifdef TARGET_SPARC64
3600 gen_helper_flushw(cpu_env);
3601 #else
3602 if (!supervisor(dc))
3603 goto priv_insn;
3604 gen_store_gpr(dc, rd, cpu_tbr);
3605 #endif
3606 break;
3607 #endif
3608 } else if (xop == 0x34) { /* FPU Operations */
3609 if (gen_trap_ifnofpu(dc)) {
3610 goto jmp_insn;
3612 gen_op_clear_ieee_excp_and_FTT();
3613 rs1 = GET_FIELD(insn, 13, 17);
3614 rs2 = GET_FIELD(insn, 27, 31);
3615 xop = GET_FIELD(insn, 18, 26);
3617 switch (xop) {
3618 case 0x1: /* fmovs */
3619 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3620 gen_store_fpr_F(dc, rd, cpu_src1_32);
3621 break;
3622 case 0x5: /* fnegs */
3623 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3624 break;
3625 case 0x9: /* fabss */
3626 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3627 break;
3628 case 0x29: /* fsqrts */
3629 CHECK_FPU_FEATURE(dc, FSQRT);
3630 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3631 break;
3632 case 0x2a: /* fsqrtd */
3633 CHECK_FPU_FEATURE(dc, FSQRT);
3634 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3635 break;
3636 case 0x2b: /* fsqrtq */
3637 CHECK_FPU_FEATURE(dc, FLOAT128);
3638 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3639 break;
3640 case 0x41: /* fadds */
3641 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3642 break;
3643 case 0x42: /* faddd */
3644 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3645 break;
3646 case 0x43: /* faddq */
3647 CHECK_FPU_FEATURE(dc, FLOAT128);
3648 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3649 break;
3650 case 0x45: /* fsubs */
3651 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3652 break;
3653 case 0x46: /* fsubd */
3654 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3655 break;
3656 case 0x47: /* fsubq */
3657 CHECK_FPU_FEATURE(dc, FLOAT128);
3658 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3659 break;
3660 case 0x49: /* fmuls */
3661 CHECK_FPU_FEATURE(dc, FMUL);
3662 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3663 break;
3664 case 0x4a: /* fmuld */
3665 CHECK_FPU_FEATURE(dc, FMUL);
3666 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3667 break;
3668 case 0x4b: /* fmulq */
3669 CHECK_FPU_FEATURE(dc, FLOAT128);
3670 CHECK_FPU_FEATURE(dc, FMUL);
3671 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3672 break;
3673 case 0x4d: /* fdivs */
3674 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3675 break;
3676 case 0x4e: /* fdivd */
3677 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3678 break;
3679 case 0x4f: /* fdivq */
3680 CHECK_FPU_FEATURE(dc, FLOAT128);
3681 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3682 break;
3683 case 0x69: /* fsmuld */
3684 CHECK_FPU_FEATURE(dc, FSMULD);
3685 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3686 break;
3687 case 0x6e: /* fdmulq */
3688 CHECK_FPU_FEATURE(dc, FLOAT128);
3689 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3690 break;
3691 case 0xc4: /* fitos */
3692 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3693 break;
3694 case 0xc6: /* fdtos */
3695 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3696 break;
3697 case 0xc7: /* fqtos */
3698 CHECK_FPU_FEATURE(dc, FLOAT128);
3699 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3700 break;
3701 case 0xc8: /* fitod */
3702 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3703 break;
3704 case 0xc9: /* fstod */
3705 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3706 break;
3707 case 0xcb: /* fqtod */
3708 CHECK_FPU_FEATURE(dc, FLOAT128);
3709 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3710 break;
3711 case 0xcc: /* fitoq */
3712 CHECK_FPU_FEATURE(dc, FLOAT128);
3713 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3714 break;
3715 case 0xcd: /* fstoq */
3716 CHECK_FPU_FEATURE(dc, FLOAT128);
3717 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3718 break;
3719 case 0xce: /* fdtoq */
3720 CHECK_FPU_FEATURE(dc, FLOAT128);
3721 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3722 break;
3723 case 0xd1: /* fstoi */
3724 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3725 break;
3726 case 0xd2: /* fdtoi */
3727 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3728 break;
3729 case 0xd3: /* fqtoi */
3730 CHECK_FPU_FEATURE(dc, FLOAT128);
3731 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3732 break;
3733 #ifdef TARGET_SPARC64
3734 case 0x2: /* V9 fmovd */
3735 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3736 gen_store_fpr_D(dc, rd, cpu_src1_64);
3737 break;
3738 case 0x3: /* V9 fmovq */
3739 CHECK_FPU_FEATURE(dc, FLOAT128);
3740 gen_move_Q(dc, rd, rs2);
3741 break;
3742 case 0x6: /* V9 fnegd */
3743 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3744 break;
3745 case 0x7: /* V9 fnegq */
3746 CHECK_FPU_FEATURE(dc, FLOAT128);
3747 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3748 break;
3749 case 0xa: /* V9 fabsd */
3750 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3751 break;
3752 case 0xb: /* V9 fabsq */
3753 CHECK_FPU_FEATURE(dc, FLOAT128);
3754 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3755 break;
3756 case 0x81: /* V9 fstox */
3757 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3758 break;
3759 case 0x82: /* V9 fdtox */
3760 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3761 break;
3762 case 0x83: /* V9 fqtox */
3763 CHECK_FPU_FEATURE(dc, FLOAT128);
3764 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3765 break;
3766 case 0x84: /* V9 fxtos */
3767 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3768 break;
3769 case 0x88: /* V9 fxtod */
3770 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3771 break;
3772 case 0x8c: /* V9 fxtoq */
3773 CHECK_FPU_FEATURE(dc, FLOAT128);
3774 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3775 break;
3776 #endif
3777 default:
3778 goto illegal_insn;
3780 } else if (xop == 0x35) { /* FPU Operations */
3781 #ifdef TARGET_SPARC64
3782 int cond;
3783 #endif
3784 if (gen_trap_ifnofpu(dc)) {
3785 goto jmp_insn;
3787 gen_op_clear_ieee_excp_and_FTT();
3788 rs1 = GET_FIELD(insn, 13, 17);
3789 rs2 = GET_FIELD(insn, 27, 31);
3790 xop = GET_FIELD(insn, 18, 26);
3792 #ifdef TARGET_SPARC64
3793 #define FMOVR(sz) \
3794 do { \
3795 DisasCompare cmp; \
3796 cond = GET_FIELD_SP(insn, 10, 12); \
3797 cpu_src1 = get_src1(dc, insn); \
3798 gen_compare_reg(&cmp, cond, cpu_src1); \
3799 gen_fmov##sz(dc, &cmp, rd, rs2); \
3800 free_compare(&cmp); \
3801 } while (0)
3803 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3804 FMOVR(s);
3805 break;
3806 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3807 FMOVR(d);
3808 break;
3809 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3810 CHECK_FPU_FEATURE(dc, FLOAT128);
3811 FMOVR(q);
3812 break;
3814 #undef FMOVR
3815 #endif
3816 switch (xop) {
3817 #ifdef TARGET_SPARC64
3818 #define FMOVCC(fcc, sz) \
3819 do { \
3820 DisasCompare cmp; \
3821 cond = GET_FIELD_SP(insn, 14, 17); \
3822 gen_fcompare(&cmp, fcc, cond); \
3823 gen_fmov##sz(dc, &cmp, rd, rs2); \
3824 free_compare(&cmp); \
3825 } while (0)
3827 case 0x001: /* V9 fmovscc %fcc0 */
3828 FMOVCC(0, s);
3829 break;
3830 case 0x002: /* V9 fmovdcc %fcc0 */
3831 FMOVCC(0, d);
3832 break;
3833 case 0x003: /* V9 fmovqcc %fcc0 */
3834 CHECK_FPU_FEATURE(dc, FLOAT128);
3835 FMOVCC(0, q);
3836 break;
3837 case 0x041: /* V9 fmovscc %fcc1 */
3838 FMOVCC(1, s);
3839 break;
3840 case 0x042: /* V9 fmovdcc %fcc1 */
3841 FMOVCC(1, d);
3842 break;
3843 case 0x043: /* V9 fmovqcc %fcc1 */
3844 CHECK_FPU_FEATURE(dc, FLOAT128);
3845 FMOVCC(1, q);
3846 break;
3847 case 0x081: /* V9 fmovscc %fcc2 */
3848 FMOVCC(2, s);
3849 break;
3850 case 0x082: /* V9 fmovdcc %fcc2 */
3851 FMOVCC(2, d);
3852 break;
3853 case 0x083: /* V9 fmovqcc %fcc2 */
3854 CHECK_FPU_FEATURE(dc, FLOAT128);
3855 FMOVCC(2, q);
3856 break;
3857 case 0x0c1: /* V9 fmovscc %fcc3 */
3858 FMOVCC(3, s);
3859 break;
3860 case 0x0c2: /* V9 fmovdcc %fcc3 */
3861 FMOVCC(3, d);
3862 break;
3863 case 0x0c3: /* V9 fmovqcc %fcc3 */
3864 CHECK_FPU_FEATURE(dc, FLOAT128);
3865 FMOVCC(3, q);
3866 break;
3867 #undef FMOVCC
3868 #define FMOVCC(xcc, sz) \
3869 do { \
3870 DisasCompare cmp; \
3871 cond = GET_FIELD_SP(insn, 14, 17); \
3872 gen_compare(&cmp, xcc, cond, dc); \
3873 gen_fmov##sz(dc, &cmp, rd, rs2); \
3874 free_compare(&cmp); \
3875 } while (0)
3877 case 0x101: /* V9 fmovscc %icc */
3878 FMOVCC(0, s);
3879 break;
3880 case 0x102: /* V9 fmovdcc %icc */
3881 FMOVCC(0, d);
3882 break;
3883 case 0x103: /* V9 fmovqcc %icc */
3884 CHECK_FPU_FEATURE(dc, FLOAT128);
3885 FMOVCC(0, q);
3886 break;
3887 case 0x181: /* V9 fmovscc %xcc */
3888 FMOVCC(1, s);
3889 break;
3890 case 0x182: /* V9 fmovdcc %xcc */
3891 FMOVCC(1, d);
3892 break;
3893 case 0x183: /* V9 fmovqcc %xcc */
3894 CHECK_FPU_FEATURE(dc, FLOAT128);
3895 FMOVCC(1, q);
3896 break;
3897 #undef FMOVCC
3898 #endif
3899 case 0x51: /* fcmps, V9 %fcc */
3900 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3901 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3902 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3903 break;
3904 case 0x52: /* fcmpd, V9 %fcc */
3905 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3906 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3907 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3908 break;
3909 case 0x53: /* fcmpq, V9 %fcc */
3910 CHECK_FPU_FEATURE(dc, FLOAT128);
3911 gen_op_load_fpr_QT0(QFPREG(rs1));
3912 gen_op_load_fpr_QT1(QFPREG(rs2));
3913 gen_op_fcmpq(rd & 3);
3914 break;
3915 case 0x55: /* fcmpes, V9 %fcc */
3916 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3917 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3918 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3919 break;
3920 case 0x56: /* fcmped, V9 %fcc */
3921 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3922 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3923 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3924 break;
3925 case 0x57: /* fcmpeq, V9 %fcc */
3926 CHECK_FPU_FEATURE(dc, FLOAT128);
3927 gen_op_load_fpr_QT0(QFPREG(rs1));
3928 gen_op_load_fpr_QT1(QFPREG(rs2));
3929 gen_op_fcmpeq(rd & 3);
3930 break;
3931 default:
3932 goto illegal_insn;
3934 } else if (xop == 0x2) {
3935 TCGv dst = gen_dest_gpr(dc, rd);
3936 rs1 = GET_FIELD(insn, 13, 17);
3937 if (rs1 == 0) {
3938 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3939 if (IS_IMM) { /* immediate */
3940 simm = GET_FIELDs(insn, 19, 31);
3941 tcg_gen_movi_tl(dst, simm);
3942 gen_store_gpr(dc, rd, dst);
3943 } else { /* register */
3944 rs2 = GET_FIELD(insn, 27, 31);
3945 if (rs2 == 0) {
3946 tcg_gen_movi_tl(dst, 0);
3947 gen_store_gpr(dc, rd, dst);
3948 } else {
3949 cpu_src2 = gen_load_gpr(dc, rs2);
3950 gen_store_gpr(dc, rd, cpu_src2);
3953 } else {
3954 cpu_src1 = get_src1(dc, insn);
3955 if (IS_IMM) { /* immediate */
3956 simm = GET_FIELDs(insn, 19, 31);
3957 tcg_gen_ori_tl(dst, cpu_src1, simm);
3958 gen_store_gpr(dc, rd, dst);
3959 } else { /* register */
3960 rs2 = GET_FIELD(insn, 27, 31);
3961 if (rs2 == 0) {
3962 /* mov shortcut: or x, %g0, y -> mov x, y */
3963 gen_store_gpr(dc, rd, cpu_src1);
3964 } else {
3965 cpu_src2 = gen_load_gpr(dc, rs2);
3966 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3967 gen_store_gpr(dc, rd, dst);
3971 #ifdef TARGET_SPARC64
3972 } else if (xop == 0x25) { /* sll, V9 sllx */
3973 cpu_src1 = get_src1(dc, insn);
3974 if (IS_IMM) { /* immediate */
3975 simm = GET_FIELDs(insn, 20, 31);
3976 if (insn & (1 << 12)) {
3977 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3978 } else {
3979 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3981 } else { /* register */
3982 rs2 = GET_FIELD(insn, 27, 31);
3983 cpu_src2 = gen_load_gpr(dc, rs2);
3984 cpu_tmp0 = get_temp_tl(dc);
3985 if (insn & (1 << 12)) {
3986 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3987 } else {
3988 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3990 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3992 gen_store_gpr(dc, rd, cpu_dst);
3993 } else if (xop == 0x26) { /* srl, V9 srlx */
3994 cpu_src1 = get_src1(dc, insn);
3995 if (IS_IMM) { /* immediate */
3996 simm = GET_FIELDs(insn, 20, 31);
3997 if (insn & (1 << 12)) {
3998 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3999 } else {
4000 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4001 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4003 } else { /* register */
4004 rs2 = GET_FIELD(insn, 27, 31);
4005 cpu_src2 = gen_load_gpr(dc, rs2);
4006 cpu_tmp0 = get_temp_tl(dc);
4007 if (insn & (1 << 12)) {
4008 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4009 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4010 } else {
4011 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4012 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4013 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4016 gen_store_gpr(dc, rd, cpu_dst);
4017 } else if (xop == 0x27) { /* sra, V9 srax */
4018 cpu_src1 = get_src1(dc, insn);
4019 if (IS_IMM) { /* immediate */
4020 simm = GET_FIELDs(insn, 20, 31);
4021 if (insn & (1 << 12)) {
4022 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4023 } else {
4024 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4025 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4027 } else { /* register */
4028 rs2 = GET_FIELD(insn, 27, 31);
4029 cpu_src2 = gen_load_gpr(dc, rs2);
4030 cpu_tmp0 = get_temp_tl(dc);
4031 if (insn & (1 << 12)) {
4032 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4033 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4034 } else {
4035 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4036 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4037 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4040 gen_store_gpr(dc, rd, cpu_dst);
4041 #endif
4042 } else if (xop < 0x36) {
4043 if (xop < 0x20) {
4044 cpu_src1 = get_src1(dc, insn);
4045 cpu_src2 = get_src2(dc, insn);
4046 switch (xop & ~0x10) {
4047 case 0x0: /* add */
4048 if (xop & 0x10) {
4049 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4050 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4051 dc->cc_op = CC_OP_ADD;
4052 } else {
4053 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4055 break;
4056 case 0x1: /* and */
4057 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4058 if (xop & 0x10) {
4059 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4060 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4061 dc->cc_op = CC_OP_LOGIC;
4063 break;
4064 case 0x2: /* or */
4065 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4066 if (xop & 0x10) {
4067 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4068 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4069 dc->cc_op = CC_OP_LOGIC;
4071 break;
4072 case 0x3: /* xor */
4073 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4074 if (xop & 0x10) {
4075 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4076 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4077 dc->cc_op = CC_OP_LOGIC;
4079 break;
4080 case 0x4: /* sub */
4081 if (xop & 0x10) {
4082 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4083 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4084 dc->cc_op = CC_OP_SUB;
4085 } else {
4086 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4088 break;
4089 case 0x5: /* andn */
4090 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4091 if (xop & 0x10) {
4092 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4093 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4094 dc->cc_op = CC_OP_LOGIC;
4096 break;
4097 case 0x6: /* orn */
4098 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4099 if (xop & 0x10) {
4100 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4101 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4102 dc->cc_op = CC_OP_LOGIC;
4104 break;
4105 case 0x7: /* xorn */
4106 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4107 if (xop & 0x10) {
4108 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4109 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4110 dc->cc_op = CC_OP_LOGIC;
4112 break;
4113 case 0x8: /* addx, V9 addc */
4114 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4115 (xop & 0x10));
4116 break;
4117 #ifdef TARGET_SPARC64
4118 case 0x9: /* V9 mulx */
4119 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4120 break;
4121 #endif
4122 case 0xa: /* umul */
4123 CHECK_IU_FEATURE(dc, MUL);
4124 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4125 if (xop & 0x10) {
4126 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4127 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4128 dc->cc_op = CC_OP_LOGIC;
4130 break;
4131 case 0xb: /* smul */
4132 CHECK_IU_FEATURE(dc, MUL);
4133 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4134 if (xop & 0x10) {
4135 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4136 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4137 dc->cc_op = CC_OP_LOGIC;
4139 break;
4140 case 0xc: /* subx, V9 subc */
4141 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4142 (xop & 0x10));
4143 break;
4144 #ifdef TARGET_SPARC64
4145 case 0xd: /* V9 udivx */
4146 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4147 break;
4148 #endif
4149 case 0xe: /* udiv */
4150 CHECK_IU_FEATURE(dc, DIV);
4151 if (xop & 0x10) {
4152 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4153 cpu_src2);
4154 dc->cc_op = CC_OP_DIV;
4155 } else {
4156 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4157 cpu_src2);
4159 break;
4160 case 0xf: /* sdiv */
4161 CHECK_IU_FEATURE(dc, DIV);
4162 if (xop & 0x10) {
4163 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4164 cpu_src2);
4165 dc->cc_op = CC_OP_DIV;
4166 } else {
4167 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4168 cpu_src2);
4170 break;
4171 default:
4172 goto illegal_insn;
4174 gen_store_gpr(dc, rd, cpu_dst);
4175 } else {
4176 cpu_src1 = get_src1(dc, insn);
4177 cpu_src2 = get_src2(dc, insn);
4178 switch (xop) {
4179 case 0x20: /* taddcc */
4180 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4181 gen_store_gpr(dc, rd, cpu_dst);
4182 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4183 dc->cc_op = CC_OP_TADD;
4184 break;
4185 case 0x21: /* tsubcc */
4186 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4187 gen_store_gpr(dc, rd, cpu_dst);
4188 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4189 dc->cc_op = CC_OP_TSUB;
4190 break;
4191 case 0x22: /* taddcctv */
4192 gen_helper_taddcctv(cpu_dst, cpu_env,
4193 cpu_src1, cpu_src2);
4194 gen_store_gpr(dc, rd, cpu_dst);
4195 dc->cc_op = CC_OP_TADDTV;
4196 break;
4197 case 0x23: /* tsubcctv */
4198 gen_helper_tsubcctv(cpu_dst, cpu_env,
4199 cpu_src1, cpu_src2);
4200 gen_store_gpr(dc, rd, cpu_dst);
4201 dc->cc_op = CC_OP_TSUBTV;
4202 break;
4203 case 0x24: /* mulscc */
4204 update_psr(dc);
4205 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4206 gen_store_gpr(dc, rd, cpu_dst);
4207 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4208 dc->cc_op = CC_OP_ADD;
4209 break;
4210 #ifndef TARGET_SPARC64
4211 case 0x25: /* sll */
4212 if (IS_IMM) { /* immediate */
4213 simm = GET_FIELDs(insn, 20, 31);
4214 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4215 } else { /* register */
4216 cpu_tmp0 = get_temp_tl(dc);
4217 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4218 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4220 gen_store_gpr(dc, rd, cpu_dst);
4221 break;
4222 case 0x26: /* srl */
4223 if (IS_IMM) { /* immediate */
4224 simm = GET_FIELDs(insn, 20, 31);
4225 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4226 } else { /* register */
4227 cpu_tmp0 = get_temp_tl(dc);
4228 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4229 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4231 gen_store_gpr(dc, rd, cpu_dst);
4232 break;
4233 case 0x27: /* sra */
4234 if (IS_IMM) { /* immediate */
4235 simm = GET_FIELDs(insn, 20, 31);
4236 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4237 } else { /* register */
4238 cpu_tmp0 = get_temp_tl(dc);
4239 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4240 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4242 gen_store_gpr(dc, rd, cpu_dst);
4243 break;
4244 #endif
4245 case 0x30:
4247 cpu_tmp0 = get_temp_tl(dc);
4248 switch(rd) {
4249 case 0: /* wry */
4250 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4251 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4252 break;
4253 #ifndef TARGET_SPARC64
4254 case 0x01 ... 0x0f: /* undefined in the
4255 SPARCv8 manual, nop
4256 on the microSPARC
4257 II */
4258 case 0x10 ... 0x1f: /* implementation-dependent
4259 in the SPARCv8
4260 manual, nop on the
4261 microSPARC II */
4262 if ((rd == 0x13) && (dc->def->features &
4263 CPU_FEATURE_POWERDOWN)) {
4264 /* LEON3 power-down */
4265 save_state(dc);
4266 gen_helper_power_down(cpu_env);
4268 break;
4269 #else
4270 case 0x2: /* V9 wrccr */
4271 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4272 gen_helper_wrccr(cpu_env, cpu_tmp0);
4273 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4274 dc->cc_op = CC_OP_FLAGS;
4275 break;
4276 case 0x3: /* V9 wrasi */
4277 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4278 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4279 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4280 offsetof(CPUSPARCState, asi));
4281 /* End TB to notice changed ASI. */
4282 save_state(dc);
4283 gen_op_next_insn();
4284 tcg_gen_exit_tb(0);
4285 dc->is_br = 1;
4286 break;
4287 case 0x6: /* V9 wrfprs */
4288 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4289 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4290 dc->fprs_dirty = 0;
4291 save_state(dc);
4292 gen_op_next_insn();
4293 tcg_gen_exit_tb(0);
4294 dc->is_br = 1;
4295 break;
4296 case 0xf: /* V9 sir, nop if user */
4297 #if !defined(CONFIG_USER_ONLY)
4298 if (supervisor(dc)) {
4299 ; // XXX
4301 #endif
4302 break;
4303 case 0x13: /* Graphics Status */
4304 if (gen_trap_ifnofpu(dc)) {
4305 goto jmp_insn;
4307 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4308 break;
4309 case 0x14: /* Softint set */
4310 if (!supervisor(dc))
4311 goto illegal_insn;
4312 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4313 gen_helper_set_softint(cpu_env, cpu_tmp0);
4314 break;
4315 case 0x15: /* Softint clear */
4316 if (!supervisor(dc))
4317 goto illegal_insn;
4318 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4319 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4320 break;
4321 case 0x16: /* Softint write */
4322 if (!supervisor(dc))
4323 goto illegal_insn;
4324 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4325 gen_helper_write_softint(cpu_env, cpu_tmp0);
4326 break;
4327 case 0x17: /* Tick compare */
4328 #if !defined(CONFIG_USER_ONLY)
4329 if (!supervisor(dc))
4330 goto illegal_insn;
4331 #endif
4333 TCGv_ptr r_tickptr;
4335 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4336 cpu_src2);
4337 r_tickptr = tcg_temp_new_ptr();
4338 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4339 offsetof(CPUSPARCState, tick));
4340 gen_helper_tick_set_limit(r_tickptr,
4341 cpu_tick_cmpr);
4342 tcg_temp_free_ptr(r_tickptr);
4344 break;
4345 case 0x18: /* System tick */
4346 #if !defined(CONFIG_USER_ONLY)
4347 if (!supervisor(dc))
4348 goto illegal_insn;
4349 #endif
4351 TCGv_ptr r_tickptr;
4353 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4354 cpu_src2);
4355 r_tickptr = tcg_temp_new_ptr();
4356 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4357 offsetof(CPUSPARCState, stick));
4358 gen_helper_tick_set_count(r_tickptr,
4359 cpu_tmp0);
4360 tcg_temp_free_ptr(r_tickptr);
4362 break;
4363 case 0x19: /* System tick compare */
4364 #if !defined(CONFIG_USER_ONLY)
4365 if (!supervisor(dc))
4366 goto illegal_insn;
4367 #endif
4369 TCGv_ptr r_tickptr;
4371 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4372 cpu_src2);
4373 r_tickptr = tcg_temp_new_ptr();
4374 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4375 offsetof(CPUSPARCState, stick));
4376 gen_helper_tick_set_limit(r_tickptr,
4377 cpu_stick_cmpr);
4378 tcg_temp_free_ptr(r_tickptr);
4380 break;
4382 case 0x10: /* Performance Control */
4383 case 0x11: /* Performance Instrumentation
4384 Counter */
4385 case 0x12: /* Dispatch Control */
4386 #endif
4387 default:
4388 goto illegal_insn;
4391 break;
4392 #if !defined(CONFIG_USER_ONLY)
4393 case 0x31: /* wrpsr, V9 saved, restored */
4395 if (!supervisor(dc))
4396 goto priv_insn;
4397 #ifdef TARGET_SPARC64
4398 switch (rd) {
4399 case 0:
4400 gen_helper_saved(cpu_env);
4401 break;
4402 case 1:
4403 gen_helper_restored(cpu_env);
4404 break;
4405 case 2: /* UA2005 allclean */
4406 case 3: /* UA2005 otherw */
4407 case 4: /* UA2005 normalw */
4408 case 5: /* UA2005 invalw */
4409 // XXX
4410 default:
4411 goto illegal_insn;
4413 #else
4414 cpu_tmp0 = get_temp_tl(dc);
4415 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4416 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4418 dc->cc_op = CC_OP_FLAGS;
4419 save_state(dc);
4420 gen_op_next_insn();
4421 tcg_gen_exit_tb(0);
4422 dc->is_br = 1;
4423 #endif
4425 break;
4426 case 0x32: /* wrwim, V9 wrpr */
4428 if (!supervisor(dc))
4429 goto priv_insn;
4430 cpu_tmp0 = get_temp_tl(dc);
4431 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4432 #ifdef TARGET_SPARC64
4433 switch (rd) {
4434 case 0: // tpc
4436 TCGv_ptr r_tsptr;
4438 r_tsptr = tcg_temp_new_ptr();
4439 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4440 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4441 offsetof(trap_state, tpc));
4442 tcg_temp_free_ptr(r_tsptr);
4444 break;
4445 case 1: // tnpc
4447 TCGv_ptr r_tsptr;
4449 r_tsptr = tcg_temp_new_ptr();
4450 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4451 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4452 offsetof(trap_state, tnpc));
4453 tcg_temp_free_ptr(r_tsptr);
4455 break;
4456 case 2: // tstate
4458 TCGv_ptr r_tsptr;
4460 r_tsptr = tcg_temp_new_ptr();
4461 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4462 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4463 offsetof(trap_state,
4464 tstate));
4465 tcg_temp_free_ptr(r_tsptr);
4467 break;
4468 case 3: // tt
4470 TCGv_ptr r_tsptr;
4472 r_tsptr = tcg_temp_new_ptr();
4473 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4474 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4475 offsetof(trap_state, tt));
4476 tcg_temp_free_ptr(r_tsptr);
4478 break;
4479 case 4: // tick
4481 TCGv_ptr r_tickptr;
4483 r_tickptr = tcg_temp_new_ptr();
4484 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4485 offsetof(CPUSPARCState, tick));
4486 gen_helper_tick_set_count(r_tickptr,
4487 cpu_tmp0);
4488 tcg_temp_free_ptr(r_tickptr);
4490 break;
4491 case 5: // tba
4492 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4493 break;
4494 case 6: // pstate
4495 save_state(dc);
4496 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4497 dc->npc = DYNAMIC_PC;
4498 break;
4499 case 7: // tl
4500 save_state(dc);
4501 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4502 offsetof(CPUSPARCState, tl));
4503 dc->npc = DYNAMIC_PC;
4504 break;
4505 case 8: // pil
4506 gen_helper_wrpil(cpu_env, cpu_tmp0);
4507 break;
4508 case 9: // cwp
4509 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4510 break;
4511 case 10: // cansave
4512 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4513 offsetof(CPUSPARCState,
4514 cansave));
4515 break;
4516 case 11: // canrestore
4517 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4518 offsetof(CPUSPARCState,
4519 canrestore));
4520 break;
4521 case 12: // cleanwin
4522 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4523 offsetof(CPUSPARCState,
4524 cleanwin));
4525 break;
4526 case 13: // otherwin
4527 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4528 offsetof(CPUSPARCState,
4529 otherwin));
4530 break;
4531 case 14: // wstate
4532 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4533 offsetof(CPUSPARCState,
4534 wstate));
4535 break;
4536 case 16: // UA2005 gl
4537 CHECK_IU_FEATURE(dc, GL);
4538 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4539 offsetof(CPUSPARCState, gl));
4540 break;
4541 case 26: // UA2005 strand status
4542 CHECK_IU_FEATURE(dc, HYPV);
4543 if (!hypervisor(dc))
4544 goto priv_insn;
4545 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4546 break;
4547 default:
4548 goto illegal_insn;
4550 #else
4551 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4552 if (dc->def->nwindows != 32) {
4553 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4554 (1 << dc->def->nwindows) - 1);
4556 #endif
4558 break;
4559 case 0x33: /* wrtbr, UA2005 wrhpr */
4561 #ifndef TARGET_SPARC64
4562 if (!supervisor(dc))
4563 goto priv_insn;
4564 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4565 #else
4566 CHECK_IU_FEATURE(dc, HYPV);
4567 if (!hypervisor(dc))
4568 goto priv_insn;
4569 cpu_tmp0 = get_temp_tl(dc);
4570 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4571 switch (rd) {
4572 case 0: // hpstate
4573 // XXX gen_op_wrhpstate();
4574 save_state(dc);
4575 gen_op_next_insn();
4576 tcg_gen_exit_tb(0);
4577 dc->is_br = 1;
4578 break;
4579 case 1: // htstate
4580 // XXX gen_op_wrhtstate();
4581 break;
4582 case 3: // hintp
4583 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4584 break;
4585 case 5: // htba
4586 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4587 break;
4588 case 31: // hstick_cmpr
4590 TCGv_ptr r_tickptr;
4592 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4593 r_tickptr = tcg_temp_new_ptr();
4594 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4595 offsetof(CPUSPARCState, hstick));
4596 gen_helper_tick_set_limit(r_tickptr,
4597 cpu_hstick_cmpr);
4598 tcg_temp_free_ptr(r_tickptr);
4600 break;
4601 case 6: // hver readonly
4602 default:
4603 goto illegal_insn;
4605 #endif
4607 break;
4608 #endif
4609 #ifdef TARGET_SPARC64
4610 case 0x2c: /* V9 movcc */
4612 int cc = GET_FIELD_SP(insn, 11, 12);
4613 int cond = GET_FIELD_SP(insn, 14, 17);
4614 DisasCompare cmp;
4615 TCGv dst;
4617 if (insn & (1 << 18)) {
4618 if (cc == 0) {
4619 gen_compare(&cmp, 0, cond, dc);
4620 } else if (cc == 2) {
4621 gen_compare(&cmp, 1, cond, dc);
4622 } else {
4623 goto illegal_insn;
4625 } else {
4626 gen_fcompare(&cmp, cc, cond);
4629 /* The get_src2 above loaded the normal 13-bit
4630 immediate field, not the 11-bit field we have
4631 in movcc. But it did handle the reg case. */
4632 if (IS_IMM) {
4633 simm = GET_FIELD_SPs(insn, 0, 10);
4634 tcg_gen_movi_tl(cpu_src2, simm);
4637 dst = gen_load_gpr(dc, rd);
4638 tcg_gen_movcond_tl(cmp.cond, dst,
4639 cmp.c1, cmp.c2,
4640 cpu_src2, dst);
4641 free_compare(&cmp);
4642 gen_store_gpr(dc, rd, dst);
4643 break;
4645 case 0x2d: /* V9 sdivx */
4646 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4647 gen_store_gpr(dc, rd, cpu_dst);
4648 break;
4649 case 0x2e: /* V9 popc */
4650 gen_helper_popc(cpu_dst, cpu_src2);
4651 gen_store_gpr(dc, rd, cpu_dst);
4652 break;
4653 case 0x2f: /* V9 movr */
4655 int cond = GET_FIELD_SP(insn, 10, 12);
4656 DisasCompare cmp;
4657 TCGv dst;
4659 gen_compare_reg(&cmp, cond, cpu_src1);
4661 /* The get_src2 above loaded the normal 13-bit
4662 immediate field, not the 10-bit field we have
4663 in movr. But it did handle the reg case. */
4664 if (IS_IMM) {
4665 simm = GET_FIELD_SPs(insn, 0, 9);
4666 tcg_gen_movi_tl(cpu_src2, simm);
4669 dst = gen_load_gpr(dc, rd);
4670 tcg_gen_movcond_tl(cmp.cond, dst,
4671 cmp.c1, cmp.c2,
4672 cpu_src2, dst);
4673 free_compare(&cmp);
4674 gen_store_gpr(dc, rd, dst);
4675 break;
4677 #endif
4678 default:
4679 goto illegal_insn;
4682 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4683 #ifdef TARGET_SPARC64
4684 int opf = GET_FIELD_SP(insn, 5, 13);
4685 rs1 = GET_FIELD(insn, 13, 17);
4686 rs2 = GET_FIELD(insn, 27, 31);
4687 if (gen_trap_ifnofpu(dc)) {
4688 goto jmp_insn;
4691 switch (opf) {
4692 case 0x000: /* VIS I edge8cc */
4693 CHECK_FPU_FEATURE(dc, VIS1);
4694 cpu_src1 = gen_load_gpr(dc, rs1);
4695 cpu_src2 = gen_load_gpr(dc, rs2);
4696 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4697 gen_store_gpr(dc, rd, cpu_dst);
4698 break;
4699 case 0x001: /* VIS II edge8n */
4700 CHECK_FPU_FEATURE(dc, VIS2);
4701 cpu_src1 = gen_load_gpr(dc, rs1);
4702 cpu_src2 = gen_load_gpr(dc, rs2);
4703 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4704 gen_store_gpr(dc, rd, cpu_dst);
4705 break;
4706 case 0x002: /* VIS I edge8lcc */
4707 CHECK_FPU_FEATURE(dc, VIS1);
4708 cpu_src1 = gen_load_gpr(dc, rs1);
4709 cpu_src2 = gen_load_gpr(dc, rs2);
4710 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4711 gen_store_gpr(dc, rd, cpu_dst);
4712 break;
4713 case 0x003: /* VIS II edge8ln */
4714 CHECK_FPU_FEATURE(dc, VIS2);
4715 cpu_src1 = gen_load_gpr(dc, rs1);
4716 cpu_src2 = gen_load_gpr(dc, rs2);
4717 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4718 gen_store_gpr(dc, rd, cpu_dst);
4719 break;
4720 case 0x004: /* VIS I edge16cc */
4721 CHECK_FPU_FEATURE(dc, VIS1);
4722 cpu_src1 = gen_load_gpr(dc, rs1);
4723 cpu_src2 = gen_load_gpr(dc, rs2);
4724 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4725 gen_store_gpr(dc, rd, cpu_dst);
4726 break;
4727 case 0x005: /* VIS II edge16n */
4728 CHECK_FPU_FEATURE(dc, VIS2);
4729 cpu_src1 = gen_load_gpr(dc, rs1);
4730 cpu_src2 = gen_load_gpr(dc, rs2);
4731 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4732 gen_store_gpr(dc, rd, cpu_dst);
4733 break;
4734 case 0x006: /* VIS I edge16lcc */
4735 CHECK_FPU_FEATURE(dc, VIS1);
4736 cpu_src1 = gen_load_gpr(dc, rs1);
4737 cpu_src2 = gen_load_gpr(dc, rs2);
4738 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4739 gen_store_gpr(dc, rd, cpu_dst);
4740 break;
4741 case 0x007: /* VIS II edge16ln */
4742 CHECK_FPU_FEATURE(dc, VIS2);
4743 cpu_src1 = gen_load_gpr(dc, rs1);
4744 cpu_src2 = gen_load_gpr(dc, rs2);
4745 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4746 gen_store_gpr(dc, rd, cpu_dst);
4747 break;
4748 case 0x008: /* VIS I edge32cc */
4749 CHECK_FPU_FEATURE(dc, VIS1);
4750 cpu_src1 = gen_load_gpr(dc, rs1);
4751 cpu_src2 = gen_load_gpr(dc, rs2);
4752 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4753 gen_store_gpr(dc, rd, cpu_dst);
4754 break;
4755 case 0x009: /* VIS II edge32n */
4756 CHECK_FPU_FEATURE(dc, VIS2);
4757 cpu_src1 = gen_load_gpr(dc, rs1);
4758 cpu_src2 = gen_load_gpr(dc, rs2);
4759 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4760 gen_store_gpr(dc, rd, cpu_dst);
4761 break;
4762 case 0x00a: /* VIS I edge32lcc */
4763 CHECK_FPU_FEATURE(dc, VIS1);
4764 cpu_src1 = gen_load_gpr(dc, rs1);
4765 cpu_src2 = gen_load_gpr(dc, rs2);
4766 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4767 gen_store_gpr(dc, rd, cpu_dst);
4768 break;
4769 case 0x00b: /* VIS II edge32ln */
4770 CHECK_FPU_FEATURE(dc, VIS2);
4771 cpu_src1 = gen_load_gpr(dc, rs1);
4772 cpu_src2 = gen_load_gpr(dc, rs2);
4773 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4774 gen_store_gpr(dc, rd, cpu_dst);
4775 break;
4776 case 0x010: /* VIS I array8 */
4777 CHECK_FPU_FEATURE(dc, VIS1);
4778 cpu_src1 = gen_load_gpr(dc, rs1);
4779 cpu_src2 = gen_load_gpr(dc, rs2);
4780 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4781 gen_store_gpr(dc, rd, cpu_dst);
4782 break;
4783 case 0x012: /* VIS I array16 */
4784 CHECK_FPU_FEATURE(dc, VIS1);
4785 cpu_src1 = gen_load_gpr(dc, rs1);
4786 cpu_src2 = gen_load_gpr(dc, rs2);
4787 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4788 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4789 gen_store_gpr(dc, rd, cpu_dst);
4790 break;
4791 case 0x014: /* VIS I array32 */
4792 CHECK_FPU_FEATURE(dc, VIS1);
4793 cpu_src1 = gen_load_gpr(dc, rs1);
4794 cpu_src2 = gen_load_gpr(dc, rs2);
4795 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4796 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4797 gen_store_gpr(dc, rd, cpu_dst);
4798 break;
4799 case 0x018: /* VIS I alignaddr */
4800 CHECK_FPU_FEATURE(dc, VIS1);
4801 cpu_src1 = gen_load_gpr(dc, rs1);
4802 cpu_src2 = gen_load_gpr(dc, rs2);
4803 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4804 gen_store_gpr(dc, rd, cpu_dst);
4805 break;
4806 case 0x01a: /* VIS I alignaddrl */
4807 CHECK_FPU_FEATURE(dc, VIS1);
4808 cpu_src1 = gen_load_gpr(dc, rs1);
4809 cpu_src2 = gen_load_gpr(dc, rs2);
4810 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4811 gen_store_gpr(dc, rd, cpu_dst);
4812 break;
4813 case 0x019: /* VIS II bmask */
4814 CHECK_FPU_FEATURE(dc, VIS2);
4815 cpu_src1 = gen_load_gpr(dc, rs1);
4816 cpu_src2 = gen_load_gpr(dc, rs2);
4817 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4818 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4819 gen_store_gpr(dc, rd, cpu_dst);
4820 break;
4821 case 0x020: /* VIS I fcmple16 */
4822 CHECK_FPU_FEATURE(dc, VIS1);
4823 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4824 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4825 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4826 gen_store_gpr(dc, rd, cpu_dst);
4827 break;
4828 case 0x022: /* VIS I fcmpne16 */
4829 CHECK_FPU_FEATURE(dc, VIS1);
4830 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4831 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4832 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4833 gen_store_gpr(dc, rd, cpu_dst);
4834 break;
4835 case 0x024: /* VIS I fcmple32 */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4838 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4839 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4840 gen_store_gpr(dc, rd, cpu_dst);
4841 break;
4842 case 0x026: /* VIS I fcmpne32 */
4843 CHECK_FPU_FEATURE(dc, VIS1);
4844 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4845 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4846 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4847 gen_store_gpr(dc, rd, cpu_dst);
4848 break;
4849 case 0x028: /* VIS I fcmpgt16 */
4850 CHECK_FPU_FEATURE(dc, VIS1);
4851 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4852 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4853 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4854 gen_store_gpr(dc, rd, cpu_dst);
4855 break;
4856 case 0x02a: /* VIS I fcmpeq16 */
4857 CHECK_FPU_FEATURE(dc, VIS1);
4858 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4859 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4860 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4861 gen_store_gpr(dc, rd, cpu_dst);
4862 break;
4863 case 0x02c: /* VIS I fcmpgt32 */
4864 CHECK_FPU_FEATURE(dc, VIS1);
4865 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4866 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4867 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4868 gen_store_gpr(dc, rd, cpu_dst);
4869 break;
4870 case 0x02e: /* VIS I fcmpeq32 */
4871 CHECK_FPU_FEATURE(dc, VIS1);
4872 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4873 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4874 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4875 gen_store_gpr(dc, rd, cpu_dst);
4876 break;
4877 case 0x031: /* VIS I fmul8x16 */
4878 CHECK_FPU_FEATURE(dc, VIS1);
4879 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4880 break;
4881 case 0x033: /* VIS I fmul8x16au */
4882 CHECK_FPU_FEATURE(dc, VIS1);
4883 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4884 break;
4885 case 0x035: /* VIS I fmul8x16al */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4888 break;
4889 case 0x036: /* VIS I fmul8sux16 */
4890 CHECK_FPU_FEATURE(dc, VIS1);
4891 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4892 break;
4893 case 0x037: /* VIS I fmul8ulx16 */
4894 CHECK_FPU_FEATURE(dc, VIS1);
4895 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4896 break;
4897 case 0x038: /* VIS I fmuld8sux16 */
4898 CHECK_FPU_FEATURE(dc, VIS1);
4899 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4900 break;
4901 case 0x039: /* VIS I fmuld8ulx16 */
4902 CHECK_FPU_FEATURE(dc, VIS1);
4903 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4904 break;
4905 case 0x03a: /* VIS I fpack32 */
4906 CHECK_FPU_FEATURE(dc, VIS1);
4907 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4908 break;
4909 case 0x03b: /* VIS I fpack16 */
4910 CHECK_FPU_FEATURE(dc, VIS1);
4911 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4912 cpu_dst_32 = gen_dest_fpr_F(dc);
4913 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4914 gen_store_fpr_F(dc, rd, cpu_dst_32);
4915 break;
4916 case 0x03d: /* VIS I fpackfix */
4917 CHECK_FPU_FEATURE(dc, VIS1);
4918 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4919 cpu_dst_32 = gen_dest_fpr_F(dc);
4920 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4921 gen_store_fpr_F(dc, rd, cpu_dst_32);
4922 break;
4923 case 0x03e: /* VIS I pdist */
4924 CHECK_FPU_FEATURE(dc, VIS1);
4925 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4926 break;
4927 case 0x048: /* VIS I faligndata */
4928 CHECK_FPU_FEATURE(dc, VIS1);
4929 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4930 break;
4931 case 0x04b: /* VIS I fpmerge */
4932 CHECK_FPU_FEATURE(dc, VIS1);
4933 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4934 break;
4935 case 0x04c: /* VIS II bshuffle */
4936 CHECK_FPU_FEATURE(dc, VIS2);
4937 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4938 break;
4939 case 0x04d: /* VIS I fexpand */
4940 CHECK_FPU_FEATURE(dc, VIS1);
4941 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4942 break;
4943 case 0x050: /* VIS I fpadd16 */
4944 CHECK_FPU_FEATURE(dc, VIS1);
4945 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4946 break;
4947 case 0x051: /* VIS I fpadd16s */
4948 CHECK_FPU_FEATURE(dc, VIS1);
4949 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4950 break;
4951 case 0x052: /* VIS I fpadd32 */
4952 CHECK_FPU_FEATURE(dc, VIS1);
4953 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4954 break;
4955 case 0x053: /* VIS I fpadd32s */
4956 CHECK_FPU_FEATURE(dc, VIS1);
4957 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4958 break;
4959 case 0x054: /* VIS I fpsub16 */
4960 CHECK_FPU_FEATURE(dc, VIS1);
4961 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4962 break;
4963 case 0x055: /* VIS I fpsub16s */
4964 CHECK_FPU_FEATURE(dc, VIS1);
4965 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4966 break;
4967 case 0x056: /* VIS I fpsub32 */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4970 break;
4971 case 0x057: /* VIS I fpsub32s */
4972 CHECK_FPU_FEATURE(dc, VIS1);
4973 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4974 break;
4975 case 0x060: /* VIS I fzero */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4978 tcg_gen_movi_i64(cpu_dst_64, 0);
4979 gen_store_fpr_D(dc, rd, cpu_dst_64);
4980 break;
4981 case 0x061: /* VIS I fzeros */
4982 CHECK_FPU_FEATURE(dc, VIS1);
4983 cpu_dst_32 = gen_dest_fpr_F(dc);
4984 tcg_gen_movi_i32(cpu_dst_32, 0);
4985 gen_store_fpr_F(dc, rd, cpu_dst_32);
4986 break;
4987 case 0x062: /* VIS I fnor */
4988 CHECK_FPU_FEATURE(dc, VIS1);
4989 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4990 break;
4991 case 0x063: /* VIS I fnors */
4992 CHECK_FPU_FEATURE(dc, VIS1);
4993 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4994 break;
4995 case 0x064: /* VIS I fandnot2 */
4996 CHECK_FPU_FEATURE(dc, VIS1);
4997 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4998 break;
4999 case 0x065: /* VIS I fandnot2s */
5000 CHECK_FPU_FEATURE(dc, VIS1);
5001 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5002 break;
5003 case 0x066: /* VIS I fnot2 */
5004 CHECK_FPU_FEATURE(dc, VIS1);
5005 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5006 break;
5007 case 0x067: /* VIS I fnot2s */
5008 CHECK_FPU_FEATURE(dc, VIS1);
5009 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5010 break;
5011 case 0x068: /* VIS I fandnot1 */
5012 CHECK_FPU_FEATURE(dc, VIS1);
5013 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5014 break;
5015 case 0x069: /* VIS I fandnot1s */
5016 CHECK_FPU_FEATURE(dc, VIS1);
5017 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5018 break;
5019 case 0x06a: /* VIS I fnot1 */
5020 CHECK_FPU_FEATURE(dc, VIS1);
5021 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5022 break;
5023 case 0x06b: /* VIS I fnot1s */
5024 CHECK_FPU_FEATURE(dc, VIS1);
5025 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5026 break;
5027 case 0x06c: /* VIS I fxor */
5028 CHECK_FPU_FEATURE(dc, VIS1);
5029 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5030 break;
5031 case 0x06d: /* VIS I fxors */
5032 CHECK_FPU_FEATURE(dc, VIS1);
5033 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5034 break;
5035 case 0x06e: /* VIS I fnand */
5036 CHECK_FPU_FEATURE(dc, VIS1);
5037 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5038 break;
5039 case 0x06f: /* VIS I fnands */
5040 CHECK_FPU_FEATURE(dc, VIS1);
5041 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5042 break;
5043 case 0x070: /* VIS I fand */
5044 CHECK_FPU_FEATURE(dc, VIS1);
5045 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5046 break;
5047 case 0x071: /* VIS I fands */
5048 CHECK_FPU_FEATURE(dc, VIS1);
5049 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5050 break;
5051 case 0x072: /* VIS I fxnor */
5052 CHECK_FPU_FEATURE(dc, VIS1);
5053 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5054 break;
5055 case 0x073: /* VIS I fxnors */
5056 CHECK_FPU_FEATURE(dc, VIS1);
5057 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5058 break;
5059 case 0x074: /* VIS I fsrc1 */
5060 CHECK_FPU_FEATURE(dc, VIS1);
5061 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5062 gen_store_fpr_D(dc, rd, cpu_src1_64);
5063 break;
5064 case 0x075: /* VIS I fsrc1s */
5065 CHECK_FPU_FEATURE(dc, VIS1);
5066 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5067 gen_store_fpr_F(dc, rd, cpu_src1_32);
5068 break;
5069 case 0x076: /* VIS I fornot2 */
5070 CHECK_FPU_FEATURE(dc, VIS1);
5071 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5072 break;
5073 case 0x077: /* VIS I fornot2s */
5074 CHECK_FPU_FEATURE(dc, VIS1);
5075 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5076 break;
5077 case 0x078: /* VIS I fsrc2 */
5078 CHECK_FPU_FEATURE(dc, VIS1);
5079 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5080 gen_store_fpr_D(dc, rd, cpu_src1_64);
5081 break;
5082 case 0x079: /* VIS I fsrc2s */
5083 CHECK_FPU_FEATURE(dc, VIS1);
5084 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5085 gen_store_fpr_F(dc, rd, cpu_src1_32);
5086 break;
5087 case 0x07a: /* VIS I fornot1 */
5088 CHECK_FPU_FEATURE(dc, VIS1);
5089 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5090 break;
5091 case 0x07b: /* VIS I fornot1s */
5092 CHECK_FPU_FEATURE(dc, VIS1);
5093 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5094 break;
5095 case 0x07c: /* VIS I for */
5096 CHECK_FPU_FEATURE(dc, VIS1);
5097 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5098 break;
5099 case 0x07d: /* VIS I fors */
5100 CHECK_FPU_FEATURE(dc, VIS1);
5101 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5102 break;
5103 case 0x07e: /* VIS I fone */
5104 CHECK_FPU_FEATURE(dc, VIS1);
5105 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5106 tcg_gen_movi_i64(cpu_dst_64, -1);
5107 gen_store_fpr_D(dc, rd, cpu_dst_64);
5108 break;
5109 case 0x07f: /* VIS I fones */
5110 CHECK_FPU_FEATURE(dc, VIS1);
5111 cpu_dst_32 = gen_dest_fpr_F(dc);
5112 tcg_gen_movi_i32(cpu_dst_32, -1);
5113 gen_store_fpr_F(dc, rd, cpu_dst_32);
5114 break;
5115 case 0x080: /* VIS I shutdown */
5116 case 0x081: /* VIS II siam */
5117 // XXX
5118 goto illegal_insn;
5119 default:
5120 goto illegal_insn;
5122 #else
5123 goto ncp_insn;
5124 #endif
5125 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5126 #ifdef TARGET_SPARC64
5127 goto illegal_insn;
5128 #else
5129 goto ncp_insn;
5130 #endif
5131 #ifdef TARGET_SPARC64
5132 } else if (xop == 0x39) { /* V9 return */
5133 save_state(dc);
5134 cpu_src1 = get_src1(dc, insn);
5135 cpu_tmp0 = get_temp_tl(dc);
5136 if (IS_IMM) { /* immediate */
5137 simm = GET_FIELDs(insn, 19, 31);
5138 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5139 } else { /* register */
5140 rs2 = GET_FIELD(insn, 27, 31);
5141 if (rs2) {
5142 cpu_src2 = gen_load_gpr(dc, rs2);
5143 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5144 } else {
5145 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5148 gen_helper_restore(cpu_env);
5149 gen_mov_pc_npc(dc);
5150 gen_check_align(cpu_tmp0, 3);
5151 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5152 dc->npc = DYNAMIC_PC;
5153 goto jmp_insn;
5154 #endif
5155 } else {
5156 cpu_src1 = get_src1(dc, insn);
5157 cpu_tmp0 = get_temp_tl(dc);
5158 if (IS_IMM) { /* immediate */
5159 simm = GET_FIELDs(insn, 19, 31);
5160 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5161 } else { /* register */
5162 rs2 = GET_FIELD(insn, 27, 31);
5163 if (rs2) {
5164 cpu_src2 = gen_load_gpr(dc, rs2);
5165 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5166 } else {
5167 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5170 switch (xop) {
5171 case 0x38: /* jmpl */
5173 TCGv t = gen_dest_gpr(dc, rd);
5174 tcg_gen_movi_tl(t, dc->pc);
5175 gen_store_gpr(dc, rd, t);
5177 gen_mov_pc_npc(dc);
5178 gen_check_align(cpu_tmp0, 3);
5179 gen_address_mask(dc, cpu_tmp0);
5180 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5181 dc->npc = DYNAMIC_PC;
5183 goto jmp_insn;
5184 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5185 case 0x39: /* rett, V9 return */
5187 if (!supervisor(dc))
5188 goto priv_insn;
5189 gen_mov_pc_npc(dc);
5190 gen_check_align(cpu_tmp0, 3);
5191 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5192 dc->npc = DYNAMIC_PC;
5193 gen_helper_rett(cpu_env);
5195 goto jmp_insn;
5196 #endif
5197 case 0x3b: /* flush */
5198 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5199 goto unimp_flush;
5200 /* nop */
5201 break;
5202 case 0x3c: /* save */
5203 gen_helper_save(cpu_env);
5204 gen_store_gpr(dc, rd, cpu_tmp0);
5205 break;
5206 case 0x3d: /* restore */
5207 gen_helper_restore(cpu_env);
5208 gen_store_gpr(dc, rd, cpu_tmp0);
5209 break;
5210 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5211 case 0x3e: /* V9 done/retry */
5213 switch (rd) {
5214 case 0:
5215 if (!supervisor(dc))
5216 goto priv_insn;
5217 dc->npc = DYNAMIC_PC;
5218 dc->pc = DYNAMIC_PC;
5219 gen_helper_done(cpu_env);
5220 goto jmp_insn;
5221 case 1:
5222 if (!supervisor(dc))
5223 goto priv_insn;
5224 dc->npc = DYNAMIC_PC;
5225 dc->pc = DYNAMIC_PC;
5226 gen_helper_retry(cpu_env);
5227 goto jmp_insn;
5228 default:
5229 goto illegal_insn;
5232 break;
5233 #endif
5234 default:
5235 goto illegal_insn;
5238 break;
5240 break;
5241 case 3: /* load/store instructions */
5243 unsigned int xop = GET_FIELD(insn, 7, 12);
5244 /* ??? gen_address_mask prevents us from using a source
5245 register directly. Always generate a temporary. */
5246 TCGv cpu_addr = get_temp_tl(dc);
5248 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5249 if (xop == 0x3c || xop == 0x3e) {
5250 /* V9 casa/casxa : no offset */
5251 } else if (IS_IMM) { /* immediate */
5252 simm = GET_FIELDs(insn, 19, 31);
5253 if (simm != 0) {
5254 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5256 } else { /* register */
5257 rs2 = GET_FIELD(insn, 27, 31);
5258 if (rs2 != 0) {
5259 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5262 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5263 (xop > 0x17 && xop <= 0x1d ) ||
5264 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5265 TCGv cpu_val = gen_dest_gpr(dc, rd);
5267 switch (xop) {
5268 case 0x0: /* ld, V9 lduw, load unsigned word */
5269 gen_address_mask(dc, cpu_addr);
5270 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5271 break;
5272 case 0x1: /* ldub, load unsigned byte */
5273 gen_address_mask(dc, cpu_addr);
5274 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5275 break;
5276 case 0x2: /* lduh, load unsigned halfword */
5277 gen_address_mask(dc, cpu_addr);
5278 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5279 break;
5280 case 0x3: /* ldd, load double word */
5281 if (rd & 1)
5282 goto illegal_insn;
5283 else {
5284 TCGv_i64 t64;
5286 gen_address_mask(dc, cpu_addr);
5287 t64 = tcg_temp_new_i64();
5288 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5289 tcg_gen_trunc_i64_tl(cpu_val, t64);
5290 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5291 gen_store_gpr(dc, rd + 1, cpu_val);
5292 tcg_gen_shri_i64(t64, t64, 32);
5293 tcg_gen_trunc_i64_tl(cpu_val, t64);
5294 tcg_temp_free_i64(t64);
5295 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5297 break;
5298 case 0x9: /* ldsb, load signed byte */
5299 gen_address_mask(dc, cpu_addr);
5300 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5301 break;
5302 case 0xa: /* ldsh, load signed halfword */
5303 gen_address_mask(dc, cpu_addr);
5304 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5305 break;
5306 case 0xd: /* ldstub */
5307 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5308 break;
5309 case 0x0f:
5310 /* swap, swap register with memory. Also atomically */
5311 CHECK_IU_FEATURE(dc, SWAP);
5312 cpu_src1 = gen_load_gpr(dc, rd);
5313 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5314 dc->mem_idx, MO_TEUL);
5315 break;
5316 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5317 case 0x10: /* lda, V9 lduwa, load word alternate */
5318 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5319 break;
5320 case 0x11: /* lduba, load unsigned byte alternate */
5321 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5322 break;
5323 case 0x12: /* lduha, load unsigned halfword alternate */
5324 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5325 break;
5326 case 0x13: /* ldda, load double word alternate */
5327 if (rd & 1) {
5328 goto illegal_insn;
5330 gen_ldda_asi(dc, cpu_addr, insn, rd);
5331 goto skip_move;
5332 case 0x19: /* ldsba, load signed byte alternate */
5333 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5334 break;
5335 case 0x1a: /* ldsha, load signed halfword alternate */
5336 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5337 break;
5338 case 0x1d: /* ldstuba -- XXX: should be atomically */
5339 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5340 break;
5341 case 0x1f: /* swapa, swap reg with alt. memory. Also
5342 atomically */
5343 CHECK_IU_FEATURE(dc, SWAP);
5344 cpu_src1 = gen_load_gpr(dc, rd);
5345 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5346 break;
5348 #ifndef TARGET_SPARC64
5349 case 0x30: /* ldc */
5350 case 0x31: /* ldcsr */
5351 case 0x33: /* lddc */
5352 goto ncp_insn;
5353 #endif
5354 #endif
5355 #ifdef TARGET_SPARC64
5356 case 0x08: /* V9 ldsw */
5357 gen_address_mask(dc, cpu_addr);
5358 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5359 break;
5360 case 0x0b: /* V9 ldx */
5361 gen_address_mask(dc, cpu_addr);
5362 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5363 break;
5364 case 0x18: /* V9 ldswa */
5365 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5366 break;
5367 case 0x1b: /* V9 ldxa */
5368 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5369 break;
5370 case 0x2d: /* V9 prefetch, no effect */
5371 goto skip_move;
5372 case 0x30: /* V9 ldfa */
5373 if (gen_trap_ifnofpu(dc)) {
5374 goto jmp_insn;
5376 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5377 gen_update_fprs_dirty(dc, rd);
5378 goto skip_move;
5379 case 0x33: /* V9 lddfa */
5380 if (gen_trap_ifnofpu(dc)) {
5381 goto jmp_insn;
5383 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5384 gen_update_fprs_dirty(dc, DFPREG(rd));
5385 goto skip_move;
5386 case 0x3d: /* V9 prefetcha, no effect */
5387 goto skip_move;
5388 case 0x32: /* V9 ldqfa */
5389 CHECK_FPU_FEATURE(dc, FLOAT128);
5390 if (gen_trap_ifnofpu(dc)) {
5391 goto jmp_insn;
5393 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5394 gen_update_fprs_dirty(dc, QFPREG(rd));
5395 goto skip_move;
5396 #endif
5397 default:
5398 goto illegal_insn;
5400 gen_store_gpr(dc, rd, cpu_val);
5401 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5402 skip_move: ;
5403 #endif
5404 } else if (xop >= 0x20 && xop < 0x24) {
5405 if (gen_trap_ifnofpu(dc)) {
5406 goto jmp_insn;
5408 switch (xop) {
5409 case 0x20: /* ldf, load fpreg */
5410 gen_address_mask(dc, cpu_addr);
5411 cpu_dst_32 = gen_dest_fpr_F(dc);
5412 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5413 dc->mem_idx, MO_TEUL);
5414 gen_store_fpr_F(dc, rd, cpu_dst_32);
5415 break;
5416 case 0x21: /* ldfsr, V9 ldxfsr */
5417 #ifdef TARGET_SPARC64
5418 gen_address_mask(dc, cpu_addr);
5419 if (rd == 1) {
5420 TCGv_i64 t64 = tcg_temp_new_i64();
5421 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5422 dc->mem_idx, MO_TEQ);
5423 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5424 tcg_temp_free_i64(t64);
5425 break;
5427 #endif
5428 cpu_dst_32 = get_temp_i32(dc);
5429 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5430 dc->mem_idx, MO_TEUL);
5431 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5432 break;
5433 case 0x22: /* ldqf, load quad fpreg */
5434 CHECK_FPU_FEATURE(dc, FLOAT128);
5435 gen_address_mask(dc, cpu_addr);
5436 cpu_src1_64 = tcg_temp_new_i64();
5437 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5438 MO_TEQ | MO_ALIGN_4);
5439 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5440 cpu_src2_64 = tcg_temp_new_i64();
5441 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5442 MO_TEQ | MO_ALIGN_4);
5443 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5444 tcg_temp_free_i64(cpu_src1_64);
5445 tcg_temp_free_i64(cpu_src2_64);
5446 break;
5447 case 0x23: /* lddf, load double fpreg */
5448 gen_address_mask(dc, cpu_addr);
5449 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5450 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5451 MO_TEQ | MO_ALIGN_4);
5452 gen_store_fpr_D(dc, rd, cpu_dst_64);
5453 break;
5454 default:
5455 goto illegal_insn;
5457 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5458 xop == 0xe || xop == 0x1e) {
5459 TCGv cpu_val = gen_load_gpr(dc, rd);
5461 switch (xop) {
5462 case 0x4: /* st, store word */
5463 gen_address_mask(dc, cpu_addr);
5464 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5465 break;
5466 case 0x5: /* stb, store byte */
5467 gen_address_mask(dc, cpu_addr);
5468 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5469 break;
5470 case 0x6: /* sth, store halfword */
5471 gen_address_mask(dc, cpu_addr);
5472 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5473 break;
5474 case 0x7: /* std, store double word */
5475 if (rd & 1)
5476 goto illegal_insn;
5477 else {
5478 TCGv_i64 t64;
5479 TCGv lo;
5481 gen_address_mask(dc, cpu_addr);
5482 lo = gen_load_gpr(dc, rd + 1);
5483 t64 = tcg_temp_new_i64();
5484 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5485 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5486 tcg_temp_free_i64(t64);
5488 break;
5489 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5490 case 0x14: /* sta, V9 stwa, store word alternate */
5491 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5492 break;
5493 case 0x15: /* stba, store byte alternate */
5494 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5495 break;
5496 case 0x16: /* stha, store halfword alternate */
5497 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5498 break;
5499 case 0x17: /* stda, store double word alternate */
5500 if (rd & 1) {
5501 goto illegal_insn;
5503 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5504 break;
5505 #endif
5506 #ifdef TARGET_SPARC64
5507 case 0x0e: /* V9 stx */
5508 gen_address_mask(dc, cpu_addr);
5509 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5510 break;
5511 case 0x1e: /* V9 stxa */
5512 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5513 break;
5514 #endif
5515 default:
5516 goto illegal_insn;
5518 } else if (xop > 0x23 && xop < 0x28) {
5519 if (gen_trap_ifnofpu(dc)) {
5520 goto jmp_insn;
5522 switch (xop) {
5523 case 0x24: /* stf, store fpreg */
5524 gen_address_mask(dc, cpu_addr);
5525 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5526 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5527 dc->mem_idx, MO_TEUL);
5528 break;
5529 case 0x25: /* stfsr, V9 stxfsr */
5531 #ifdef TARGET_SPARC64
5532 gen_address_mask(dc, cpu_addr);
5533 if (rd == 1) {
5534 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5535 break;
5537 #endif
5538 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5540 break;
5541 case 0x26:
5542 #ifdef TARGET_SPARC64
5543 /* V9 stqf, store quad fpreg */
5544 CHECK_FPU_FEATURE(dc, FLOAT128);
5545 gen_address_mask(dc, cpu_addr);
5546 /* ??? While stqf only requires 4-byte alignment, it is
5547 legal for the cpu to signal the unaligned exception.
5548 The OS trap handler is then required to fix it up.
5549 For qemu, this avoids having to probe the second page
5550 before performing the first write. */
5551 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5552 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5553 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5554 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5555 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5556 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5557 dc->mem_idx, MO_TEQ);
5558 break;
5559 #else /* !TARGET_SPARC64 */
5560 /* stdfq, store floating point queue */
5561 #if defined(CONFIG_USER_ONLY)
5562 goto illegal_insn;
5563 #else
5564 if (!supervisor(dc))
5565 goto priv_insn;
5566 if (gen_trap_ifnofpu(dc)) {
5567 goto jmp_insn;
5569 goto nfq_insn;
5570 #endif
5571 #endif
5572 case 0x27: /* stdf, store double fpreg */
5573 gen_address_mask(dc, cpu_addr);
5574 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5575 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5576 MO_TEQ | MO_ALIGN_4);
5577 break;
5578 default:
5579 goto illegal_insn;
5581 } else if (xop > 0x33 && xop < 0x3f) {
5582 switch (xop) {
5583 #ifdef TARGET_SPARC64
5584 case 0x34: /* V9 stfa */
5585 if (gen_trap_ifnofpu(dc)) {
5586 goto jmp_insn;
5588 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5589 break;
5590 case 0x36: /* V9 stqfa */
5592 CHECK_FPU_FEATURE(dc, FLOAT128);
5593 if (gen_trap_ifnofpu(dc)) {
5594 goto jmp_insn;
5596 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5598 break;
5599 case 0x37: /* V9 stdfa */
5600 if (gen_trap_ifnofpu(dc)) {
5601 goto jmp_insn;
5603 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5604 break;
5605 case 0x3e: /* V9 casxa */
5606 rs2 = GET_FIELD(insn, 27, 31);
5607 cpu_src2 = gen_load_gpr(dc, rs2);
5608 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5609 break;
5610 #else
5611 case 0x34: /* stc */
5612 case 0x35: /* stcsr */
5613 case 0x36: /* stdcq */
5614 case 0x37: /* stdc */
5615 goto ncp_insn;
5616 #endif
5617 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5618 case 0x3c: /* V9 or LEON3 casa */
5619 #ifndef TARGET_SPARC64
5620 CHECK_IU_FEATURE(dc, CASA);
5621 #endif
5622 rs2 = GET_FIELD(insn, 27, 31);
5623 cpu_src2 = gen_load_gpr(dc, rs2);
5624 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5625 break;
5626 #endif
5627 default:
5628 goto illegal_insn;
5630 } else {
5631 goto illegal_insn;
5634 break;
5636 /* default case for non jump instructions */
5637 if (dc->npc == DYNAMIC_PC) {
5638 dc->pc = DYNAMIC_PC;
5639 gen_op_next_insn();
5640 } else if (dc->npc == JUMP_PC) {
5641 /* we can do a static jump */
5642 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5643 dc->is_br = 1;
5644 } else {
5645 dc->pc = dc->npc;
5646 dc->npc = dc->npc + 4;
5648 jmp_insn:
5649 goto egress;
5650 illegal_insn:
5651 gen_exception(dc, TT_ILL_INSN);
5652 goto egress;
5653 unimp_flush:
5654 gen_exception(dc, TT_UNIMP_FLUSH);
5655 goto egress;
5656 #if !defined(CONFIG_USER_ONLY)
5657 priv_insn:
5658 gen_exception(dc, TT_PRIV_INSN);
5659 goto egress;
5660 #endif
5661 nfpu_insn:
5662 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5663 goto egress;
5664 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5665 nfq_insn:
5666 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5667 goto egress;
5668 #endif
5669 #ifndef TARGET_SPARC64
5670 ncp_insn:
5671 gen_exception(dc, TT_NCP_INSN);
5672 goto egress;
5673 #endif
5674 egress:
5675 if (dc->n_t32 != 0) {
5676 int i;
5677 for (i = dc->n_t32 - 1; i >= 0; --i) {
5678 tcg_temp_free_i32(dc->t32[i]);
5680 dc->n_t32 = 0;
5682 if (dc->n_ttl != 0) {
5683 int i;
5684 for (i = dc->n_ttl - 1; i >= 0; --i) {
5685 tcg_temp_free(dc->ttl[i]);
5687 dc->n_ttl = 0;
5691 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5693 SPARCCPU *cpu = sparc_env_get_cpu(env);
5694 CPUState *cs = CPU(cpu);
5695 target_ulong pc_start, last_pc;
5696 DisasContext dc1, *dc = &dc1;
5697 int num_insns;
5698 int max_insns;
5699 unsigned int insn;
5701 memset(dc, 0, sizeof(DisasContext));
5702 dc->tb = tb;
5703 pc_start = tb->pc;
5704 dc->pc = pc_start;
5705 last_pc = dc->pc;
5706 dc->npc = (target_ulong) tb->cs_base;
5707 dc->cc_op = CC_OP_DYNAMIC;
5708 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5709 dc->def = env->def;
5710 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5711 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5712 dc->singlestep = (cs->singlestep_enabled || singlestep);
5713 #ifdef TARGET_SPARC64
5714 dc->fprs_dirty = 0;
5715 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5716 #endif
5718 num_insns = 0;
5719 max_insns = tb->cflags & CF_COUNT_MASK;
5720 if (max_insns == 0) {
5721 max_insns = CF_COUNT_MASK;
5723 if (max_insns > TCG_MAX_INSNS) {
5724 max_insns = TCG_MAX_INSNS;
5727 gen_tb_start(tb);
5728 do {
5729 if (dc->npc & JUMP_PC) {
5730 assert(dc->jump_pc[1] == dc->pc + 4);
5731 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5732 } else {
5733 tcg_gen_insn_start(dc->pc, dc->npc);
5735 num_insns++;
5736 last_pc = dc->pc;
5738 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5739 if (dc->pc != pc_start) {
5740 save_state(dc);
5742 gen_helper_debug(cpu_env);
5743 tcg_gen_exit_tb(0);
5744 dc->is_br = 1;
5745 goto exit_gen_loop;
5748 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5749 gen_io_start();
5752 insn = cpu_ldl_code(env, dc->pc);
5754 disas_sparc_insn(dc, insn);
5756 if (dc->is_br)
5757 break;
5758 /* if the next PC is different, we abort now */
5759 if (dc->pc != (last_pc + 4))
5760 break;
5761 /* if we reach a page boundary, we stop generation so that the
5762 PC of a TT_TFAULT exception is always in the right page */
5763 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5764 break;
5765 /* if single step mode, we generate only one instruction and
5766 generate an exception */
5767 if (dc->singlestep) {
5768 break;
5770 } while (!tcg_op_buf_full() &&
5771 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5772 num_insns < max_insns);
5774 exit_gen_loop:
5775 if (tb->cflags & CF_LAST_IO) {
5776 gen_io_end();
5778 if (!dc->is_br) {
5779 if (dc->pc != DYNAMIC_PC &&
5780 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5781 /* static PC and NPC: we can use direct chaining */
5782 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5783 } else {
5784 if (dc->pc != DYNAMIC_PC) {
5785 tcg_gen_movi_tl(cpu_pc, dc->pc);
5787 save_npc(dc);
5788 tcg_gen_exit_tb(0);
5791 gen_tb_end(tb, num_insns);
5793 tb->size = last_pc + 4 - pc_start;
5794 tb->icount = num_insns;
5796 #ifdef DEBUG_DISAS
5797 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5798 && qemu_log_in_addr_range(pc_start)) {
5799 qemu_log_lock();
5800 qemu_log("--------------\n");
5801 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5802 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5803 qemu_log("\n");
5804 qemu_log_unlock();
5806 #endif
5809 void gen_intermediate_code_init(CPUSPARCState *env)
5811 static int inited;
5812 static const char gregnames[32][4] = {
5813 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5814 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5815 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5816 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5818 static const char fregnames[32][4] = {
5819 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5820 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5821 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5822 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5825 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5826 #ifdef TARGET_SPARC64
5827 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5828 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5829 #else
5830 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5831 #endif
5832 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5833 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5836 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5837 #ifdef TARGET_SPARC64
5838 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5839 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5840 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5841 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5842 "hstick_cmpr" },
5843 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5844 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5845 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5846 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5847 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5848 #endif
5849 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5850 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5851 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5852 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5853 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5854 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5855 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5856 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5857 #ifndef CONFIG_USER_ONLY
5858 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5859 #endif
5862 unsigned int i;
5864 /* init various static tables */
5865 if (inited) {
5866 return;
5868 inited = 1;
5870 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5871 tcg_ctx.tcg_env = cpu_env;
5873 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5874 offsetof(CPUSPARCState, regwptr),
5875 "regwptr");
5877 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5878 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5881 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5882 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5885 TCGV_UNUSED(cpu_regs[0]);
5886 for (i = 1; i < 8; ++i) {
5887 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5888 offsetof(CPUSPARCState, gregs[i]),
5889 gregnames[i]);
5892 for (i = 8; i < 32; ++i) {
5893 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5894 (i - 8) * sizeof(target_ulong),
5895 gregnames[i]);
5898 for (i = 0; i < TARGET_DPREGS; i++) {
5899 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5900 offsetof(CPUSPARCState, fpr[i]),
5901 fregnames[i]);
5905 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5906 target_ulong *data)
5908 target_ulong pc = data[0];
5909 target_ulong npc = data[1];
5911 env->pc = pc;
5912 if (npc == DYNAMIC_PC) {
5913 /* dynamic NPC: already stored */
5914 } else if (npc & JUMP_PC) {
5915 /* jump PC: use 'cond' and the jump targets of the translation */
5916 if (env->cond) {
5917 env->npc = npc & ~3;
5918 } else {
5919 env->npc = pc + 4;
5921 } else {
5922 env->npc = npc;