target-sparc: Implement swap_asi inline
[qemu/ar7.git] / target-sparc / translate.c
blob8cd8bb604771cc11ed0e643263ca40bc6d4f091f
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 #ifdef TARGET_SPARC64
86 int fprs_dirty;
87 int asi;
88 #endif
89 } DisasContext;
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
120 static int sign_extend(int x, int len)
122 len = 32 - len;
123 return (x << len) >> len;
126 #define IS_IMM (insn & (1<<13))
128 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
130 TCGv_i32 t;
131 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
132 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
133 return t;
136 static inline TCGv get_temp_tl(DisasContext *dc)
138 TCGv t;
139 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
140 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
141 return t;
144 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
146 #if defined(TARGET_SPARC64)
147 int bit = (rd < 32) ? 1 : 2;
148 /* If we know we've already set this bit within the TB,
149 we can avoid setting it again. */
150 if (!(dc->fprs_dirty & bit)) {
151 dc->fprs_dirty |= bit;
152 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
154 #endif
157 /* floating point registers moves */
158 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
160 #if TCG_TARGET_REG_BITS == 32
161 if (src & 1) {
162 return TCGV_LOW(cpu_fpr[src / 2]);
163 } else {
164 return TCGV_HIGH(cpu_fpr[src / 2]);
166 #else
167 if (src & 1) {
168 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
169 } else {
170 TCGv_i32 ret = get_temp_i32(dc);
171 TCGv_i64 t = tcg_temp_new_i64();
173 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
174 tcg_gen_extrl_i64_i32(ret, t);
175 tcg_temp_free_i64(t);
177 return ret;
179 #endif
182 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
184 #if TCG_TARGET_REG_BITS == 32
185 if (dst & 1) {
186 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
187 } else {
188 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
190 #else
191 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
192 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
193 (dst & 1 ? 0 : 32), 32);
194 #endif
195 gen_update_fprs_dirty(dc, dst);
198 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
200 return get_temp_i32(dc);
203 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
205 src = DFPREG(src);
206 return cpu_fpr[src / 2];
209 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
211 dst = DFPREG(dst);
212 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
213 gen_update_fprs_dirty(dc, dst);
216 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
218 return cpu_fpr[DFPREG(dst) / 2];
221 static void gen_op_load_fpr_QT0(unsigned int src)
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
226 offsetof(CPU_QuadU, ll.lower));
229 static void gen_op_load_fpr_QT1(unsigned int src)
231 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
234 offsetof(CPU_QuadU, ll.lower));
237 static void gen_op_store_QT0_fpr(unsigned int dst)
239 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
240 offsetof(CPU_QuadU, ll.upper));
241 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
242 offsetof(CPU_QuadU, ll.lower));
245 #ifdef TARGET_SPARC64
246 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
248 rd = QFPREG(rd);
249 rs = QFPREG(rs);
251 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
252 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
253 gen_update_fprs_dirty(dc, rd);
255 #endif
257 /* moves */
258 #ifdef CONFIG_USER_ONLY
259 #define supervisor(dc) 0
260 #ifdef TARGET_SPARC64
261 #define hypervisor(dc) 0
262 #endif
263 #else
264 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
265 #ifdef TARGET_SPARC64
266 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
267 #else
268 #endif
269 #endif
271 #ifdef TARGET_SPARC64
272 #ifndef TARGET_ABI32
273 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
274 #else
275 #define AM_CHECK(dc) (1)
276 #endif
277 #endif
279 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
281 #ifdef TARGET_SPARC64
282 if (AM_CHECK(dc))
283 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
284 #endif
287 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
289 if (reg > 0) {
290 assert(reg < 32);
291 return cpu_regs[reg];
292 } else {
293 TCGv t = get_temp_tl(dc);
294 tcg_gen_movi_tl(t, 0);
295 return t;
299 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
301 if (reg > 0) {
302 assert(reg < 32);
303 tcg_gen_mov_tl(cpu_regs[reg], v);
307 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
309 if (reg > 0) {
310 assert(reg < 32);
311 return cpu_regs[reg];
312 } else {
313 return get_temp_tl(dc);
317 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
318 target_ulong npc)
320 if (unlikely(s->singlestep)) {
321 return false;
324 #ifndef CONFIG_USER_ONLY
325 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
326 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
327 #else
328 return true;
329 #endif
332 static inline void gen_goto_tb(DisasContext *s, int tb_num,
333 target_ulong pc, target_ulong npc)
335 if (use_goto_tb(s, pc, npc)) {
336 /* jump to same page: we can use a direct jump */
337 tcg_gen_goto_tb(tb_num);
338 tcg_gen_movi_tl(cpu_pc, pc);
339 tcg_gen_movi_tl(cpu_npc, npc);
340 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
341 } else {
342 /* jump to another page: currently not optimized */
343 tcg_gen_movi_tl(cpu_pc, pc);
344 tcg_gen_movi_tl(cpu_npc, npc);
345 tcg_gen_exit_tb(0);
349 // XXX suboptimal
350 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
354 tcg_gen_andi_tl(reg, reg, 0x1);
357 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
359 tcg_gen_extu_i32_tl(reg, src);
360 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
361 tcg_gen_andi_tl(reg, reg, 0x1);
364 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
366 tcg_gen_extu_i32_tl(reg, src);
367 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
368 tcg_gen_andi_tl(reg, reg, 0x1);
371 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
373 tcg_gen_extu_i32_tl(reg, src);
374 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
375 tcg_gen_andi_tl(reg, reg, 0x1);
378 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
380 tcg_gen_mov_tl(cpu_cc_src, src1);
381 tcg_gen_mov_tl(cpu_cc_src2, src2);
382 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
383 tcg_gen_mov_tl(dst, cpu_cc_dst);
386 static TCGv_i32 gen_add32_carry32(void)
388 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
390 /* Carry is computed from a previous add: (dst < src) */
391 #if TARGET_LONG_BITS == 64
392 cc_src1_32 = tcg_temp_new_i32();
393 cc_src2_32 = tcg_temp_new_i32();
394 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
395 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
396 #else
397 cc_src1_32 = cpu_cc_dst;
398 cc_src2_32 = cpu_cc_src;
399 #endif
401 carry_32 = tcg_temp_new_i32();
402 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
404 #if TARGET_LONG_BITS == 64
405 tcg_temp_free_i32(cc_src1_32);
406 tcg_temp_free_i32(cc_src2_32);
407 #endif
409 return carry_32;
412 static TCGv_i32 gen_sub32_carry32(void)
414 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
416 /* Carry is computed from a previous borrow: (src1 < src2) */
417 #if TARGET_LONG_BITS == 64
418 cc_src1_32 = tcg_temp_new_i32();
419 cc_src2_32 = tcg_temp_new_i32();
420 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
421 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
422 #else
423 cc_src1_32 = cpu_cc_src;
424 cc_src2_32 = cpu_cc_src2;
425 #endif
427 carry_32 = tcg_temp_new_i32();
428 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
430 #if TARGET_LONG_BITS == 64
431 tcg_temp_free_i32(cc_src1_32);
432 tcg_temp_free_i32(cc_src2_32);
433 #endif
435 return carry_32;
438 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
439 TCGv src2, int update_cc)
441 TCGv_i32 carry_32;
442 TCGv carry;
444 switch (dc->cc_op) {
445 case CC_OP_DIV:
446 case CC_OP_LOGIC:
447 /* Carry is known to be zero. Fall back to plain ADD. */
448 if (update_cc) {
449 gen_op_add_cc(dst, src1, src2);
450 } else {
451 tcg_gen_add_tl(dst, src1, src2);
453 return;
455 case CC_OP_ADD:
456 case CC_OP_TADD:
457 case CC_OP_TADDTV:
458 if (TARGET_LONG_BITS == 32) {
459 /* We can re-use the host's hardware carry generation by using
460 an ADD2 opcode. We discard the low part of the output.
461 Ideally we'd combine this operation with the add that
462 generated the carry in the first place. */
463 carry = tcg_temp_new();
464 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
465 tcg_temp_free(carry);
466 goto add_done;
468 carry_32 = gen_add32_carry32();
469 break;
471 case CC_OP_SUB:
472 case CC_OP_TSUB:
473 case CC_OP_TSUBTV:
474 carry_32 = gen_sub32_carry32();
475 break;
477 default:
478 /* We need external help to produce the carry. */
479 carry_32 = tcg_temp_new_i32();
480 gen_helper_compute_C_icc(carry_32, cpu_env);
481 break;
484 #if TARGET_LONG_BITS == 64
485 carry = tcg_temp_new();
486 tcg_gen_extu_i32_i64(carry, carry_32);
487 #else
488 carry = carry_32;
489 #endif
491 tcg_gen_add_tl(dst, src1, src2);
492 tcg_gen_add_tl(dst, dst, carry);
494 tcg_temp_free_i32(carry_32);
495 #if TARGET_LONG_BITS == 64
496 tcg_temp_free(carry);
497 #endif
499 add_done:
500 if (update_cc) {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_mov_tl(cpu_cc_dst, dst);
504 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
505 dc->cc_op = CC_OP_ADDX;
509 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
511 tcg_gen_mov_tl(cpu_cc_src, src1);
512 tcg_gen_mov_tl(cpu_cc_src2, src2);
513 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
514 tcg_gen_mov_tl(dst, cpu_cc_dst);
517 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
518 TCGv src2, int update_cc)
520 TCGv_i32 carry_32;
521 TCGv carry;
523 switch (dc->cc_op) {
524 case CC_OP_DIV:
525 case CC_OP_LOGIC:
526 /* Carry is known to be zero. Fall back to plain SUB. */
527 if (update_cc) {
528 gen_op_sub_cc(dst, src1, src2);
529 } else {
530 tcg_gen_sub_tl(dst, src1, src2);
532 return;
534 case CC_OP_ADD:
535 case CC_OP_TADD:
536 case CC_OP_TADDTV:
537 carry_32 = gen_add32_carry32();
538 break;
540 case CC_OP_SUB:
541 case CC_OP_TSUB:
542 case CC_OP_TSUBTV:
543 if (TARGET_LONG_BITS == 32) {
544 /* We can re-use the host's hardware carry generation by using
545 a SUB2 opcode. We discard the low part of the output.
546 Ideally we'd combine this operation with the add that
547 generated the carry in the first place. */
548 carry = tcg_temp_new();
549 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
550 tcg_temp_free(carry);
551 goto sub_done;
553 carry_32 = gen_sub32_carry32();
554 break;
556 default:
557 /* We need external help to produce the carry. */
558 carry_32 = tcg_temp_new_i32();
559 gen_helper_compute_C_icc(carry_32, cpu_env);
560 break;
563 #if TARGET_LONG_BITS == 64
564 carry = tcg_temp_new();
565 tcg_gen_extu_i32_i64(carry, carry_32);
566 #else
567 carry = carry_32;
568 #endif
570 tcg_gen_sub_tl(dst, src1, src2);
571 tcg_gen_sub_tl(dst, dst, carry);
573 tcg_temp_free_i32(carry_32);
574 #if TARGET_LONG_BITS == 64
575 tcg_temp_free(carry);
576 #endif
578 sub_done:
579 if (update_cc) {
580 tcg_gen_mov_tl(cpu_cc_src, src1);
581 tcg_gen_mov_tl(cpu_cc_src2, src2);
582 tcg_gen_mov_tl(cpu_cc_dst, dst);
583 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
584 dc->cc_op = CC_OP_SUBX;
588 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
590 TCGv r_temp, zero, t0;
592 r_temp = tcg_temp_new();
593 t0 = tcg_temp_new();
595 /* old op:
596 if (!(env->y & 1))
597 T1 = 0;
599 zero = tcg_const_tl(0);
600 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
601 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
602 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
603 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
604 zero, cpu_cc_src2);
605 tcg_temp_free(zero);
607 // b2 = T0 & 1;
608 // env->y = (b2 << 31) | (env->y >> 1);
609 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
610 tcg_gen_shli_tl(r_temp, r_temp, 31);
611 tcg_gen_shri_tl(t0, cpu_y, 1);
612 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
613 tcg_gen_or_tl(t0, t0, r_temp);
614 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
616 // b1 = N ^ V;
617 gen_mov_reg_N(t0, cpu_psr);
618 gen_mov_reg_V(r_temp, cpu_psr);
619 tcg_gen_xor_tl(t0, t0, r_temp);
620 tcg_temp_free(r_temp);
622 // T0 = (b1 << 31) | (T0 >> 1);
623 // src1 = T0;
624 tcg_gen_shli_tl(t0, t0, 31);
625 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
626 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
627 tcg_temp_free(t0);
629 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
631 tcg_gen_mov_tl(dst, cpu_cc_dst);
634 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
636 #if TARGET_LONG_BITS == 32
637 if (sign_ext) {
638 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
639 } else {
640 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
642 #else
643 TCGv t0 = tcg_temp_new_i64();
644 TCGv t1 = tcg_temp_new_i64();
646 if (sign_ext) {
647 tcg_gen_ext32s_i64(t0, src1);
648 tcg_gen_ext32s_i64(t1, src2);
649 } else {
650 tcg_gen_ext32u_i64(t0, src1);
651 tcg_gen_ext32u_i64(t1, src2);
654 tcg_gen_mul_i64(dst, t0, t1);
655 tcg_temp_free(t0);
656 tcg_temp_free(t1);
658 tcg_gen_shri_i64(cpu_y, dst, 32);
659 #endif
662 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
664 /* zero-extend truncated operands before multiplication */
665 gen_op_multiply(dst, src1, src2, 0);
668 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
670 /* sign-extend truncated operands before multiplication */
671 gen_op_multiply(dst, src1, src2, 1);
674 // 1
675 static inline void gen_op_eval_ba(TCGv dst)
677 tcg_gen_movi_tl(dst, 1);
680 // Z
681 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
683 gen_mov_reg_Z(dst, src);
686 // Z | (N ^ V)
687 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
689 TCGv t0 = tcg_temp_new();
690 gen_mov_reg_N(t0, src);
691 gen_mov_reg_V(dst, src);
692 tcg_gen_xor_tl(dst, dst, t0);
693 gen_mov_reg_Z(t0, src);
694 tcg_gen_or_tl(dst, dst, t0);
695 tcg_temp_free(t0);
698 // N ^ V
699 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
701 TCGv t0 = tcg_temp_new();
702 gen_mov_reg_V(t0, src);
703 gen_mov_reg_N(dst, src);
704 tcg_gen_xor_tl(dst, dst, t0);
705 tcg_temp_free(t0);
708 // C | Z
709 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
711 TCGv t0 = tcg_temp_new();
712 gen_mov_reg_Z(t0, src);
713 gen_mov_reg_C(dst, src);
714 tcg_gen_or_tl(dst, dst, t0);
715 tcg_temp_free(t0);
718 // C
719 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
721 gen_mov_reg_C(dst, src);
724 // V
725 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
727 gen_mov_reg_V(dst, src);
730 // 0
731 static inline void gen_op_eval_bn(TCGv dst)
733 tcg_gen_movi_tl(dst, 0);
736 // N
737 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
739 gen_mov_reg_N(dst, src);
742 // !Z
743 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
745 gen_mov_reg_Z(dst, src);
746 tcg_gen_xori_tl(dst, dst, 0x1);
749 // !(Z | (N ^ V))
750 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
752 gen_op_eval_ble(dst, src);
753 tcg_gen_xori_tl(dst, dst, 0x1);
756 // !(N ^ V)
757 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
759 gen_op_eval_bl(dst, src);
760 tcg_gen_xori_tl(dst, dst, 0x1);
763 // !(C | Z)
764 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
766 gen_op_eval_bleu(dst, src);
767 tcg_gen_xori_tl(dst, dst, 0x1);
770 // !C
771 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
773 gen_mov_reg_C(dst, src);
774 tcg_gen_xori_tl(dst, dst, 0x1);
777 // !N
778 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
780 gen_mov_reg_N(dst, src);
781 tcg_gen_xori_tl(dst, dst, 0x1);
784 // !V
785 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
787 gen_mov_reg_V(dst, src);
788 tcg_gen_xori_tl(dst, dst, 0x1);
792 FPSR bit field FCC1 | FCC0:
796 3 unordered
798 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
799 unsigned int fcc_offset)
801 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
802 tcg_gen_andi_tl(reg, reg, 0x1);
805 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
806 unsigned int fcc_offset)
808 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
809 tcg_gen_andi_tl(reg, reg, 0x1);
812 // !0: FCC0 | FCC1
813 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
814 unsigned int fcc_offset)
816 TCGv t0 = tcg_temp_new();
817 gen_mov_reg_FCC0(dst, src, fcc_offset);
818 gen_mov_reg_FCC1(t0, src, fcc_offset);
819 tcg_gen_or_tl(dst, dst, t0);
820 tcg_temp_free(t0);
823 // 1 or 2: FCC0 ^ FCC1
824 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
825 unsigned int fcc_offset)
827 TCGv t0 = tcg_temp_new();
828 gen_mov_reg_FCC0(dst, src, fcc_offset);
829 gen_mov_reg_FCC1(t0, src, fcc_offset);
830 tcg_gen_xor_tl(dst, dst, t0);
831 tcg_temp_free(t0);
834 // 1 or 3: FCC0
835 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
841 // 1: FCC0 & !FCC1
842 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
845 TCGv t0 = tcg_temp_new();
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
847 gen_mov_reg_FCC1(t0, src, fcc_offset);
848 tcg_gen_andc_tl(dst, dst, t0);
849 tcg_temp_free(t0);
852 // 2 or 3: FCC1
853 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
854 unsigned int fcc_offset)
856 gen_mov_reg_FCC1(dst, src, fcc_offset);
859 // 2: !FCC0 & FCC1
860 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
861 unsigned int fcc_offset)
863 TCGv t0 = tcg_temp_new();
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 gen_mov_reg_FCC1(t0, src, fcc_offset);
866 tcg_gen_andc_tl(dst, t0, dst);
867 tcg_temp_free(t0);
870 // 3: FCC0 & FCC1
871 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
872 unsigned int fcc_offset)
874 TCGv t0 = tcg_temp_new();
875 gen_mov_reg_FCC0(dst, src, fcc_offset);
876 gen_mov_reg_FCC1(t0, src, fcc_offset);
877 tcg_gen_and_tl(dst, dst, t0);
878 tcg_temp_free(t0);
881 // 0: !(FCC0 | FCC1)
882 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
883 unsigned int fcc_offset)
885 TCGv t0 = tcg_temp_new();
886 gen_mov_reg_FCC0(dst, src, fcc_offset);
887 gen_mov_reg_FCC1(t0, src, fcc_offset);
888 tcg_gen_or_tl(dst, dst, t0);
889 tcg_gen_xori_tl(dst, dst, 0x1);
890 tcg_temp_free(t0);
893 // 0 or 3: !(FCC0 ^ FCC1)
894 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
895 unsigned int fcc_offset)
897 TCGv t0 = tcg_temp_new();
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 gen_mov_reg_FCC1(t0, src, fcc_offset);
900 tcg_gen_xor_tl(dst, dst, t0);
901 tcg_gen_xori_tl(dst, dst, 0x1);
902 tcg_temp_free(t0);
905 // 0 or 2: !FCC0
906 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 tcg_gen_xori_tl(dst, dst, 0x1);
913 // !1: !(FCC0 & !FCC1)
914 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
915 unsigned int fcc_offset)
917 TCGv t0 = tcg_temp_new();
918 gen_mov_reg_FCC0(dst, src, fcc_offset);
919 gen_mov_reg_FCC1(t0, src, fcc_offset);
920 tcg_gen_andc_tl(dst, dst, t0);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 tcg_temp_free(t0);
925 // 0 or 1: !FCC1
926 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
927 unsigned int fcc_offset)
929 gen_mov_reg_FCC1(dst, src, fcc_offset);
930 tcg_gen_xori_tl(dst, dst, 0x1);
933 // !2: !(!FCC0 & FCC1)
934 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
935 unsigned int fcc_offset)
937 TCGv t0 = tcg_temp_new();
938 gen_mov_reg_FCC0(dst, src, fcc_offset);
939 gen_mov_reg_FCC1(t0, src, fcc_offset);
940 tcg_gen_andc_tl(dst, t0, dst);
941 tcg_gen_xori_tl(dst, dst, 0x1);
942 tcg_temp_free(t0);
945 // !3: !(FCC0 & FCC1)
946 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
947 unsigned int fcc_offset)
949 TCGv t0 = tcg_temp_new();
950 gen_mov_reg_FCC0(dst, src, fcc_offset);
951 gen_mov_reg_FCC1(t0, src, fcc_offset);
952 tcg_gen_and_tl(dst, dst, t0);
953 tcg_gen_xori_tl(dst, dst, 0x1);
954 tcg_temp_free(t0);
957 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
958 target_ulong pc2, TCGv r_cond)
960 TCGLabel *l1 = gen_new_label();
962 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
964 gen_goto_tb(dc, 0, pc1, pc1 + 4);
966 gen_set_label(l1);
967 gen_goto_tb(dc, 1, pc2, pc2 + 4);
970 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
972 TCGLabel *l1 = gen_new_label();
973 target_ulong npc = dc->npc;
975 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
977 gen_goto_tb(dc, 0, npc, pc1);
979 gen_set_label(l1);
980 gen_goto_tb(dc, 1, npc + 4, npc + 8);
982 dc->is_br = 1;
985 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
987 target_ulong npc = dc->npc;
989 if (likely(npc != DYNAMIC_PC)) {
990 dc->pc = npc;
991 dc->jump_pc[0] = pc1;
992 dc->jump_pc[1] = npc + 4;
993 dc->npc = JUMP_PC;
994 } else {
995 TCGv t, z;
997 tcg_gen_mov_tl(cpu_pc, cpu_npc);
999 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1000 t = tcg_const_tl(pc1);
1001 z = tcg_const_tl(0);
1002 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1003 tcg_temp_free(t);
1004 tcg_temp_free(z);
1006 dc->pc = DYNAMIC_PC;
1010 static inline void gen_generic_branch(DisasContext *dc)
1012 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1013 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1014 TCGv zero = tcg_const_tl(0);
1016 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1018 tcg_temp_free(npc0);
1019 tcg_temp_free(npc1);
1020 tcg_temp_free(zero);
1023 /* call this function before using the condition register as it may
1024 have been set for a jump */
1025 static inline void flush_cond(DisasContext *dc)
1027 if (dc->npc == JUMP_PC) {
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC;
1033 static inline void save_npc(DisasContext *dc)
1035 if (dc->npc == JUMP_PC) {
1036 gen_generic_branch(dc);
1037 dc->npc = DYNAMIC_PC;
1038 } else if (dc->npc != DYNAMIC_PC) {
1039 tcg_gen_movi_tl(cpu_npc, dc->npc);
1043 static inline void update_psr(DisasContext *dc)
1045 if (dc->cc_op != CC_OP_FLAGS) {
1046 dc->cc_op = CC_OP_FLAGS;
1047 gen_helper_compute_psr(cpu_env);
1051 static inline void save_state(DisasContext *dc)
1053 tcg_gen_movi_tl(cpu_pc, dc->pc);
1054 save_npc(dc);
1057 static void gen_exception(DisasContext *dc, int which)
1059 TCGv_i32 t;
1061 save_state(dc);
1062 t = tcg_const_i32(which);
1063 gen_helper_raise_exception(cpu_env, t);
1064 tcg_temp_free_i32(t);
1065 dc->is_br = 1;
1068 static void gen_check_align(TCGv addr, int mask)
1070 TCGv_i32 r_mask = tcg_const_i32(mask);
1071 gen_helper_check_align(cpu_env, addr, r_mask);
1072 tcg_temp_free_i32(r_mask);
1075 static inline void gen_mov_pc_npc(DisasContext *dc)
1077 if (dc->npc == JUMP_PC) {
1078 gen_generic_branch(dc);
1079 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1080 dc->pc = DYNAMIC_PC;
1081 } else if (dc->npc == DYNAMIC_PC) {
1082 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1083 dc->pc = DYNAMIC_PC;
1084 } else {
1085 dc->pc = dc->npc;
1089 static inline void gen_op_next_insn(void)
1091 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1092 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1095 static void free_compare(DisasCompare *cmp)
1097 if (!cmp->g1) {
1098 tcg_temp_free(cmp->c1);
1100 if (!cmp->g2) {
1101 tcg_temp_free(cmp->c2);
1105 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1106 DisasContext *dc)
1108 static int subcc_cond[16] = {
1109 TCG_COND_NEVER,
1110 TCG_COND_EQ,
1111 TCG_COND_LE,
1112 TCG_COND_LT,
1113 TCG_COND_LEU,
1114 TCG_COND_LTU,
1115 -1, /* neg */
1116 -1, /* overflow */
1117 TCG_COND_ALWAYS,
1118 TCG_COND_NE,
1119 TCG_COND_GT,
1120 TCG_COND_GE,
1121 TCG_COND_GTU,
1122 TCG_COND_GEU,
1123 -1, /* pos */
1124 -1, /* no overflow */
1127 static int logic_cond[16] = {
1128 TCG_COND_NEVER,
1129 TCG_COND_EQ, /* eq: Z */
1130 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1131 TCG_COND_LT, /* lt: N ^ V -> N */
1132 TCG_COND_EQ, /* leu: C | Z -> Z */
1133 TCG_COND_NEVER, /* ltu: C -> 0 */
1134 TCG_COND_LT, /* neg: N */
1135 TCG_COND_NEVER, /* vs: V -> 0 */
1136 TCG_COND_ALWAYS,
1137 TCG_COND_NE, /* ne: !Z */
1138 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1139 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1140 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1141 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1142 TCG_COND_GE, /* pos: !N */
1143 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1146 TCGv_i32 r_src;
1147 TCGv r_dst;
1149 #ifdef TARGET_SPARC64
1150 if (xcc) {
1151 r_src = cpu_xcc;
1152 } else {
1153 r_src = cpu_psr;
1155 #else
1156 r_src = cpu_psr;
1157 #endif
1159 switch (dc->cc_op) {
1160 case CC_OP_LOGIC:
1161 cmp->cond = logic_cond[cond];
1162 do_compare_dst_0:
1163 cmp->is_bool = false;
1164 cmp->g2 = false;
1165 cmp->c2 = tcg_const_tl(0);
1166 #ifdef TARGET_SPARC64
1167 if (!xcc) {
1168 cmp->g1 = false;
1169 cmp->c1 = tcg_temp_new();
1170 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1171 break;
1173 #endif
1174 cmp->g1 = true;
1175 cmp->c1 = cpu_cc_dst;
1176 break;
1178 case CC_OP_SUB:
1179 switch (cond) {
1180 case 6: /* neg */
1181 case 14: /* pos */
1182 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1183 goto do_compare_dst_0;
1185 case 7: /* overflow */
1186 case 15: /* !overflow */
1187 goto do_dynamic;
1189 default:
1190 cmp->cond = subcc_cond[cond];
1191 cmp->is_bool = false;
1192 #ifdef TARGET_SPARC64
1193 if (!xcc) {
1194 /* Note that sign-extension works for unsigned compares as
1195 long as both operands are sign-extended. */
1196 cmp->g1 = cmp->g2 = false;
1197 cmp->c1 = tcg_temp_new();
1198 cmp->c2 = tcg_temp_new();
1199 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1200 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1201 break;
1203 #endif
1204 cmp->g1 = cmp->g2 = true;
1205 cmp->c1 = cpu_cc_src;
1206 cmp->c2 = cpu_cc_src2;
1207 break;
1209 break;
1211 default:
1212 do_dynamic:
1213 gen_helper_compute_psr(cpu_env);
1214 dc->cc_op = CC_OP_FLAGS;
1215 /* FALLTHRU */
1217 case CC_OP_FLAGS:
1218 /* We're going to generate a boolean result. */
1219 cmp->cond = TCG_COND_NE;
1220 cmp->is_bool = true;
1221 cmp->g1 = cmp->g2 = false;
1222 cmp->c1 = r_dst = tcg_temp_new();
1223 cmp->c2 = tcg_const_tl(0);
1225 switch (cond) {
1226 case 0x0:
1227 gen_op_eval_bn(r_dst);
1228 break;
1229 case 0x1:
1230 gen_op_eval_be(r_dst, r_src);
1231 break;
1232 case 0x2:
1233 gen_op_eval_ble(r_dst, r_src);
1234 break;
1235 case 0x3:
1236 gen_op_eval_bl(r_dst, r_src);
1237 break;
1238 case 0x4:
1239 gen_op_eval_bleu(r_dst, r_src);
1240 break;
1241 case 0x5:
1242 gen_op_eval_bcs(r_dst, r_src);
1243 break;
1244 case 0x6:
1245 gen_op_eval_bneg(r_dst, r_src);
1246 break;
1247 case 0x7:
1248 gen_op_eval_bvs(r_dst, r_src);
1249 break;
1250 case 0x8:
1251 gen_op_eval_ba(r_dst);
1252 break;
1253 case 0x9:
1254 gen_op_eval_bne(r_dst, r_src);
1255 break;
1256 case 0xa:
1257 gen_op_eval_bg(r_dst, r_src);
1258 break;
1259 case 0xb:
1260 gen_op_eval_bge(r_dst, r_src);
1261 break;
1262 case 0xc:
1263 gen_op_eval_bgu(r_dst, r_src);
1264 break;
1265 case 0xd:
1266 gen_op_eval_bcc(r_dst, r_src);
1267 break;
1268 case 0xe:
1269 gen_op_eval_bpos(r_dst, r_src);
1270 break;
1271 case 0xf:
1272 gen_op_eval_bvc(r_dst, r_src);
1273 break;
1275 break;
1279 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1281 unsigned int offset;
1282 TCGv r_dst;
1284 /* For now we still generate a straight boolean result. */
1285 cmp->cond = TCG_COND_NE;
1286 cmp->is_bool = true;
1287 cmp->g1 = cmp->g2 = false;
1288 cmp->c1 = r_dst = tcg_temp_new();
1289 cmp->c2 = tcg_const_tl(0);
1291 switch (cc) {
1292 default:
1293 case 0x0:
1294 offset = 0;
1295 break;
1296 case 0x1:
1297 offset = 32 - 10;
1298 break;
1299 case 0x2:
1300 offset = 34 - 10;
1301 break;
1302 case 0x3:
1303 offset = 36 - 10;
1304 break;
1307 switch (cond) {
1308 case 0x0:
1309 gen_op_eval_bn(r_dst);
1310 break;
1311 case 0x1:
1312 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0x2:
1315 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0x3:
1318 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0x4:
1321 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0x5:
1324 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0x6:
1327 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1328 break;
1329 case 0x7:
1330 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1331 break;
1332 case 0x8:
1333 gen_op_eval_ba(r_dst);
1334 break;
1335 case 0x9:
1336 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0xa:
1339 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1340 break;
1341 case 0xb:
1342 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0xc:
1345 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0xd:
1348 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0xe:
1351 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0xf:
1354 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1355 break;
1359 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1360 DisasContext *dc)
1362 DisasCompare cmp;
1363 gen_compare(&cmp, cc, cond, dc);
1365 /* The interface is to return a boolean in r_dst. */
1366 if (cmp.is_bool) {
1367 tcg_gen_mov_tl(r_dst, cmp.c1);
1368 } else {
1369 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1372 free_compare(&cmp);
1375 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1377 DisasCompare cmp;
1378 gen_fcompare(&cmp, cc, cond);
1380 /* The interface is to return a boolean in r_dst. */
1381 if (cmp.is_bool) {
1382 tcg_gen_mov_tl(r_dst, cmp.c1);
1383 } else {
1384 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1387 free_compare(&cmp);
1390 #ifdef TARGET_SPARC64
1391 // Inverted logic
1392 static const int gen_tcg_cond_reg[8] = {
1394 TCG_COND_NE,
1395 TCG_COND_GT,
1396 TCG_COND_GE,
1398 TCG_COND_EQ,
1399 TCG_COND_LE,
1400 TCG_COND_LT,
1403 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1405 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1406 cmp->is_bool = false;
1407 cmp->g1 = true;
1408 cmp->g2 = false;
1409 cmp->c1 = r_src;
1410 cmp->c2 = tcg_const_tl(0);
1413 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1415 DisasCompare cmp;
1416 gen_compare_reg(&cmp, cond, r_src);
1418 /* The interface is to return a boolean in r_dst. */
1419 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1421 free_compare(&cmp);
1423 #endif
1425 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1427 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1428 target_ulong target = dc->pc + offset;
1430 #ifdef TARGET_SPARC64
1431 if (unlikely(AM_CHECK(dc))) {
1432 target &= 0xffffffffULL;
1434 #endif
1435 if (cond == 0x0) {
1436 /* unconditional not taken */
1437 if (a) {
1438 dc->pc = dc->npc + 4;
1439 dc->npc = dc->pc + 4;
1440 } else {
1441 dc->pc = dc->npc;
1442 dc->npc = dc->pc + 4;
1444 } else if (cond == 0x8) {
1445 /* unconditional taken */
1446 if (a) {
1447 dc->pc = target;
1448 dc->npc = dc->pc + 4;
1449 } else {
1450 dc->pc = dc->npc;
1451 dc->npc = target;
1452 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1454 } else {
1455 flush_cond(dc);
1456 gen_cond(cpu_cond, cc, cond, dc);
1457 if (a) {
1458 gen_branch_a(dc, target);
1459 } else {
1460 gen_branch_n(dc, target);
1465 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1467 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1468 target_ulong target = dc->pc + offset;
1470 #ifdef TARGET_SPARC64
1471 if (unlikely(AM_CHECK(dc))) {
1472 target &= 0xffffffffULL;
1474 #endif
1475 if (cond == 0x0) {
1476 /* unconditional not taken */
1477 if (a) {
1478 dc->pc = dc->npc + 4;
1479 dc->npc = dc->pc + 4;
1480 } else {
1481 dc->pc = dc->npc;
1482 dc->npc = dc->pc + 4;
1484 } else if (cond == 0x8) {
1485 /* unconditional taken */
1486 if (a) {
1487 dc->pc = target;
1488 dc->npc = dc->pc + 4;
1489 } else {
1490 dc->pc = dc->npc;
1491 dc->npc = target;
1492 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1494 } else {
1495 flush_cond(dc);
1496 gen_fcond(cpu_cond, cc, cond);
1497 if (a) {
1498 gen_branch_a(dc, target);
1499 } else {
1500 gen_branch_n(dc, target);
1505 #ifdef TARGET_SPARC64
1506 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1507 TCGv r_reg)
1509 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1510 target_ulong target = dc->pc + offset;
1512 if (unlikely(AM_CHECK(dc))) {
1513 target &= 0xffffffffULL;
1515 flush_cond(dc);
1516 gen_cond_reg(cpu_cond, cond, r_reg);
1517 if (a) {
1518 gen_branch_a(dc, target);
1519 } else {
1520 gen_branch_n(dc, target);
1524 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1526 switch (fccno) {
1527 case 0:
1528 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 1:
1531 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 2:
1534 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 3:
1537 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1538 break;
1542 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1544 switch (fccno) {
1545 case 0:
1546 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 1:
1549 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 2:
1552 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 3:
1555 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1556 break;
1560 static inline void gen_op_fcmpq(int fccno)
1562 switch (fccno) {
1563 case 0:
1564 gen_helper_fcmpq(cpu_fsr, cpu_env);
1565 break;
1566 case 1:
1567 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1568 break;
1569 case 2:
1570 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1571 break;
1572 case 3:
1573 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1574 break;
1578 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1580 switch (fccno) {
1581 case 0:
1582 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 1:
1585 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 2:
1588 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1589 break;
1590 case 3:
1591 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1592 break;
1596 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1598 switch (fccno) {
1599 case 0:
1600 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 1:
1603 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 2:
1606 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1607 break;
1608 case 3:
1609 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1610 break;
1614 static inline void gen_op_fcmpeq(int fccno)
1616 switch (fccno) {
1617 case 0:
1618 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1619 break;
1620 case 1:
1621 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1622 break;
1623 case 2:
1624 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1625 break;
1626 case 3:
1627 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1628 break;
1632 #else
1634 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1636 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1639 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1641 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1644 static inline void gen_op_fcmpq(int fccno)
1646 gen_helper_fcmpq(cpu_fsr, cpu_env);
1649 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1651 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1654 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1659 static inline void gen_op_fcmpeq(int fccno)
1661 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1663 #endif
1665 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1667 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1668 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1669 gen_exception(dc, TT_FP_EXCP);
1672 static int gen_trap_ifnofpu(DisasContext *dc)
1674 #if !defined(CONFIG_USER_ONLY)
1675 if (!dc->fpu_enabled) {
1676 gen_exception(dc, TT_NFPU_INSN);
1677 return 1;
1679 #endif
1680 return 0;
1683 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1685 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1688 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1689 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1691 TCGv_i32 dst, src;
1693 src = gen_load_fpr_F(dc, rs);
1694 dst = gen_dest_fpr_F(dc);
1696 gen(dst, cpu_env, src);
1697 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1699 gen_store_fpr_F(dc, rd, dst);
1702 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1703 void (*gen)(TCGv_i32, TCGv_i32))
1705 TCGv_i32 dst, src;
1707 src = gen_load_fpr_F(dc, rs);
1708 dst = gen_dest_fpr_F(dc);
1710 gen(dst, src);
1712 gen_store_fpr_F(dc, rd, dst);
1715 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1716 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1718 TCGv_i32 dst, src1, src2;
1720 src1 = gen_load_fpr_F(dc, rs1);
1721 src2 = gen_load_fpr_F(dc, rs2);
1722 dst = gen_dest_fpr_F(dc);
1724 gen(dst, cpu_env, src1, src2);
1725 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1727 gen_store_fpr_F(dc, rd, dst);
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1732 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1734 TCGv_i32 dst, src1, src2;
1736 src1 = gen_load_fpr_F(dc, rs1);
1737 src2 = gen_load_fpr_F(dc, rs2);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src1, src2);
1742 gen_store_fpr_F(dc, rd, dst);
1744 #endif
1746 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1747 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1749 TCGv_i64 dst, src;
1751 src = gen_load_fpr_D(dc, rs);
1752 dst = gen_dest_fpr_D(dc, rd);
1754 gen(dst, cpu_env, src);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_D(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1762 void (*gen)(TCGv_i64, TCGv_i64))
1764 TCGv_i64 dst, src;
1766 src = gen_load_fpr_D(dc, rs);
1767 dst = gen_dest_fpr_D(dc, rd);
1769 gen(dst, src);
1771 gen_store_fpr_D(dc, rd, dst);
1773 #endif
1775 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1776 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1778 TCGv_i64 dst, src1, src2;
1780 src1 = gen_load_fpr_D(dc, rs1);
1781 src2 = gen_load_fpr_D(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src1, src2);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src1, src2;
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1800 gen(dst, src1, src2);
1802 gen_store_fpr_D(dc, rd, dst);
1805 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_gsr, src1, src2);
1816 gen_store_fpr_D(dc, rd, dst);
1819 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1820 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1822 TCGv_i64 dst, src0, src1, src2;
1824 src1 = gen_load_fpr_D(dc, rs1);
1825 src2 = gen_load_fpr_D(dc, rs2);
1826 src0 = gen_load_fpr_D(dc, rd);
1827 dst = gen_dest_fpr_D(dc, rd);
1829 gen(dst, src0, src1, src2);
1831 gen_store_fpr_D(dc, rd, dst);
1833 #endif
1835 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1836 void (*gen)(TCGv_ptr))
1838 gen_op_load_fpr_QT1(QFPREG(rs));
1840 gen(cpu_env);
1841 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1843 gen_op_store_QT0_fpr(QFPREG(rd));
1844 gen_update_fprs_dirty(dc, QFPREG(rd));
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1849 void (*gen)(TCGv_ptr))
1851 gen_op_load_fpr_QT1(QFPREG(rs));
1853 gen(cpu_env);
1855 gen_op_store_QT0_fpr(QFPREG(rd));
1856 gen_update_fprs_dirty(dc, QFPREG(rd));
1858 #endif
1860 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1861 void (*gen)(TCGv_ptr))
1863 gen_op_load_fpr_QT0(QFPREG(rs1));
1864 gen_op_load_fpr_QT1(QFPREG(rs2));
1866 gen(cpu_env);
1867 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1869 gen_op_store_QT0_fpr(QFPREG(rd));
1870 gen_update_fprs_dirty(dc, QFPREG(rd));
1873 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1874 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1876 TCGv_i64 dst;
1877 TCGv_i32 src1, src2;
1879 src1 = gen_load_fpr_F(dc, rs1);
1880 src2 = gen_load_fpr_F(dc, rs2);
1881 dst = gen_dest_fpr_D(dc, rd);
1883 gen(dst, cpu_env, src1, src2);
1884 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1886 gen_store_fpr_D(dc, rd, dst);
1889 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1892 TCGv_i64 src1, src2;
1894 src1 = gen_load_fpr_D(dc, rs1);
1895 src2 = gen_load_fpr_D(dc, rs2);
1897 gen(cpu_env, src1, src2);
1898 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1900 gen_op_store_QT0_fpr(QFPREG(rd));
1901 gen_update_fprs_dirty(dc, QFPREG(rd));
1904 #ifdef TARGET_SPARC64
1905 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1906 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1908 TCGv_i64 dst;
1909 TCGv_i32 src;
1911 src = gen_load_fpr_F(dc, rs);
1912 dst = gen_dest_fpr_D(dc, rd);
1914 gen(dst, cpu_env, src);
1915 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1917 gen_store_fpr_D(dc, rd, dst);
1919 #endif
1921 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1924 TCGv_i64 dst;
1925 TCGv_i32 src;
1927 src = gen_load_fpr_F(dc, rs);
1928 dst = gen_dest_fpr_D(dc, rd);
1930 gen(dst, cpu_env, src);
1932 gen_store_fpr_D(dc, rd, dst);
1935 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1938 TCGv_i32 dst;
1939 TCGv_i64 src;
1941 src = gen_load_fpr_D(dc, rs);
1942 dst = gen_dest_fpr_F(dc);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_F(dc, rd, dst);
1950 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1951 void (*gen)(TCGv_i32, TCGv_ptr))
1953 TCGv_i32 dst;
1955 gen_op_load_fpr_QT1(QFPREG(rs));
1956 dst = gen_dest_fpr_F(dc);
1958 gen(dst, cpu_env);
1959 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1961 gen_store_fpr_F(dc, rd, dst);
1964 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1965 void (*gen)(TCGv_i64, TCGv_ptr))
1967 TCGv_i64 dst;
1969 gen_op_load_fpr_QT1(QFPREG(rs));
1970 dst = gen_dest_fpr_D(dc, rd);
1972 gen(dst, cpu_env);
1973 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1975 gen_store_fpr_D(dc, rd, dst);
1978 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1979 void (*gen)(TCGv_ptr, TCGv_i32))
1981 TCGv_i32 src;
1983 src = gen_load_fpr_F(dc, rs);
1985 gen(cpu_env, src);
1987 gen_op_store_QT0_fpr(QFPREG(rd));
1988 gen_update_fprs_dirty(dc, QFPREG(rd));
1991 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1992 void (*gen)(TCGv_ptr, TCGv_i64))
1994 TCGv_i64 src;
1996 src = gen_load_fpr_D(dc, rs);
1998 gen(cpu_env, src);
2000 gen_op_store_QT0_fpr(QFPREG(rd));
2001 gen_update_fprs_dirty(dc, QFPREG(rd));
2004 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2005 TCGv addr, int mmu_idx, TCGMemOp memop)
2007 /* ??? Should be atomic. */
2008 TCGv t0 = tcg_temp_new();
2009 gen_address_mask(dc, addr);
2010 tcg_gen_qemu_ld_tl(t0, addr, mmu_idx, memop);
2011 tcg_gen_qemu_st_tl(src, addr, mmu_idx, memop);
2012 tcg_gen_mov_tl(dst, t0);
2013 tcg_temp_free(t0);
2016 /* asi moves */
2017 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2018 typedef enum {
2019 GET_ASI_HELPER,
2020 GET_ASI_EXCP,
2021 GET_ASI_DIRECT,
2022 GET_ASI_DTWINX,
2023 GET_ASI_BLOCK,
2024 GET_ASI_SHORT,
2025 } ASIType;
2027 typedef struct {
2028 ASIType type;
2029 int asi;
2030 int mem_idx;
2031 TCGMemOp memop;
2032 } DisasASI;
2034 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2036 int asi = GET_FIELD(insn, 19, 26);
2037 ASIType type = GET_ASI_HELPER;
2038 int mem_idx = dc->mem_idx;
2040 #ifndef TARGET_SPARC64
2041 /* Before v9, all asis are immediate and privileged. */
2042 if (IS_IMM) {
2043 gen_exception(dc, TT_ILL_INSN);
2044 type = GET_ASI_EXCP;
2045 } else if (supervisor(dc)
2046 /* Note that LEON accepts ASI_USERDATA in user mode, for
2047 use with CASA. Also note that previous versions of
2048 QEMU allowed (and old versions of gcc emitted) ASI_P
2049 for LEON, which is incorrect. */
2050 || (asi == ASI_USERDATA
2051 && (dc->def->features & CPU_FEATURE_CASA))) {
2052 switch (asi) {
2053 case ASI_USERDATA: /* User data access */
2054 mem_idx = MMU_USER_IDX;
2055 type = GET_ASI_DIRECT;
2056 break;
2057 case ASI_KERNELDATA: /* Supervisor data access */
2058 mem_idx = MMU_KERNEL_IDX;
2059 type = GET_ASI_DIRECT;
2060 break;
2061 case ASI_M_BYPASS: /* MMU passthrough */
2062 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2063 mem_idx = MMU_PHYS_IDX;
2064 type = GET_ASI_DIRECT;
2065 break;
2067 } else {
2068 gen_exception(dc, TT_PRIV_INSN);
2069 type = GET_ASI_EXCP;
2071 #else
2072 if (IS_IMM) {
2073 asi = dc->asi;
2075 /* With v9, all asis below 0x80 are privileged. */
2076 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2077 down that bit into DisasContext. For the moment that's ok,
2078 since the direct implementations below doesn't have any ASIs
2079 in the restricted [0x30, 0x7f] range, and the check will be
2080 done properly in the helper. */
2081 if (!supervisor(dc) && asi < 0x80) {
2082 gen_exception(dc, TT_PRIV_ACT);
2083 type = GET_ASI_EXCP;
2084 } else {
2085 switch (asi) {
2086 case ASI_REAL: /* Bypass */
2087 case ASI_REAL_IO: /* Bypass, non-cacheable */
2088 case ASI_REAL_L: /* Bypass LE */
2089 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2090 case ASI_TWINX_REAL: /* Real address, twinx */
2091 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2092 case ASI_QUAD_LDD_PHYS:
2093 case ASI_QUAD_LDD_PHYS_L:
2094 mem_idx = MMU_PHYS_IDX;
2095 break;
2096 case ASI_N: /* Nucleus */
2097 case ASI_NL: /* Nucleus LE */
2098 case ASI_TWINX_N:
2099 case ASI_TWINX_NL:
2100 case ASI_NUCLEUS_QUAD_LDD:
2101 case ASI_NUCLEUS_QUAD_LDD_L:
2102 mem_idx = MMU_NUCLEUS_IDX;
2103 break;
2104 case ASI_AIUP: /* As if user primary */
2105 case ASI_AIUPL: /* As if user primary LE */
2106 case ASI_TWINX_AIUP:
2107 case ASI_TWINX_AIUP_L:
2108 case ASI_BLK_AIUP_4V:
2109 case ASI_BLK_AIUP_L_4V:
2110 case ASI_BLK_AIUP:
2111 case ASI_BLK_AIUPL:
2112 mem_idx = MMU_USER_IDX;
2113 break;
2114 case ASI_AIUS: /* As if user secondary */
2115 case ASI_AIUSL: /* As if user secondary LE */
2116 case ASI_TWINX_AIUS:
2117 case ASI_TWINX_AIUS_L:
2118 case ASI_BLK_AIUS_4V:
2119 case ASI_BLK_AIUS_L_4V:
2120 case ASI_BLK_AIUS:
2121 case ASI_BLK_AIUSL:
2122 mem_idx = MMU_USER_SECONDARY_IDX;
2123 break;
2124 case ASI_S: /* Secondary */
2125 case ASI_SL: /* Secondary LE */
2126 case ASI_TWINX_S:
2127 case ASI_TWINX_SL:
2128 case ASI_BLK_COMMIT_S:
2129 case ASI_BLK_S:
2130 case ASI_BLK_SL:
2131 case ASI_FL8_S:
2132 case ASI_FL8_SL:
2133 case ASI_FL16_S:
2134 case ASI_FL16_SL:
2135 if (mem_idx == MMU_USER_IDX) {
2136 mem_idx = MMU_USER_SECONDARY_IDX;
2137 } else if (mem_idx == MMU_KERNEL_IDX) {
2138 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2140 break;
2141 case ASI_P: /* Primary */
2142 case ASI_PL: /* Primary LE */
2143 case ASI_TWINX_P:
2144 case ASI_TWINX_PL:
2145 case ASI_BLK_COMMIT_P:
2146 case ASI_BLK_P:
2147 case ASI_BLK_PL:
2148 case ASI_FL8_P:
2149 case ASI_FL8_PL:
2150 case ASI_FL16_P:
2151 case ASI_FL16_PL:
2152 break;
2154 switch (asi) {
2155 case ASI_REAL:
2156 case ASI_REAL_IO:
2157 case ASI_REAL_L:
2158 case ASI_REAL_IO_L:
2159 case ASI_N:
2160 case ASI_NL:
2161 case ASI_AIUP:
2162 case ASI_AIUPL:
2163 case ASI_AIUS:
2164 case ASI_AIUSL:
2165 case ASI_S:
2166 case ASI_SL:
2167 case ASI_P:
2168 case ASI_PL:
2169 type = GET_ASI_DIRECT;
2170 break;
2171 case ASI_TWINX_REAL:
2172 case ASI_TWINX_REAL_L:
2173 case ASI_TWINX_N:
2174 case ASI_TWINX_NL:
2175 case ASI_TWINX_AIUP:
2176 case ASI_TWINX_AIUP_L:
2177 case ASI_TWINX_AIUS:
2178 case ASI_TWINX_AIUS_L:
2179 case ASI_TWINX_P:
2180 case ASI_TWINX_PL:
2181 case ASI_TWINX_S:
2182 case ASI_TWINX_SL:
2183 case ASI_QUAD_LDD_PHYS:
2184 case ASI_QUAD_LDD_PHYS_L:
2185 case ASI_NUCLEUS_QUAD_LDD:
2186 case ASI_NUCLEUS_QUAD_LDD_L:
2187 type = GET_ASI_DTWINX;
2188 break;
2189 case ASI_BLK_COMMIT_P:
2190 case ASI_BLK_COMMIT_S:
2191 case ASI_BLK_AIUP_4V:
2192 case ASI_BLK_AIUP_L_4V:
2193 case ASI_BLK_AIUP:
2194 case ASI_BLK_AIUPL:
2195 case ASI_BLK_AIUS_4V:
2196 case ASI_BLK_AIUS_L_4V:
2197 case ASI_BLK_AIUS:
2198 case ASI_BLK_AIUSL:
2199 case ASI_BLK_S:
2200 case ASI_BLK_SL:
2201 case ASI_BLK_P:
2202 case ASI_BLK_PL:
2203 type = GET_ASI_BLOCK;
2204 break;
2205 case ASI_FL8_S:
2206 case ASI_FL8_SL:
2207 case ASI_FL8_P:
2208 case ASI_FL8_PL:
2209 memop = MO_UB;
2210 type = GET_ASI_SHORT;
2211 break;
2212 case ASI_FL16_S:
2213 case ASI_FL16_SL:
2214 case ASI_FL16_P:
2215 case ASI_FL16_PL:
2216 memop = MO_TEUW;
2217 type = GET_ASI_SHORT;
2218 break;
2220 /* The little-endian asis all have bit 3 set. */
2221 if (asi & 8) {
2222 memop ^= MO_BSWAP;
2225 #endif
2227 return (DisasASI){ type, asi, mem_idx, memop };
2230 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2231 int insn, TCGMemOp memop)
2233 DisasASI da = get_asi(dc, insn, memop);
2235 switch (da.type) {
2236 case GET_ASI_EXCP:
2237 break;
2238 case GET_ASI_DTWINX: /* Reserved for ldda. */
2239 gen_exception(dc, TT_ILL_INSN);
2240 break;
2241 case GET_ASI_DIRECT:
2242 gen_address_mask(dc, addr);
2243 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2244 break;
2245 default:
2247 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2248 TCGv_i32 r_mop = tcg_const_i32(memop);
2250 save_state(dc);
2251 #ifdef TARGET_SPARC64
2252 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2253 #else
2255 TCGv_i64 t64 = tcg_temp_new_i64();
2256 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2257 tcg_gen_trunc_i64_tl(dst, t64);
2258 tcg_temp_free_i64(t64);
2260 #endif
2261 tcg_temp_free_i32(r_mop);
2262 tcg_temp_free_i32(r_asi);
2264 break;
2268 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2269 int insn, TCGMemOp memop)
2271 DisasASI da = get_asi(dc, insn, memop);
2273 switch (da.type) {
2274 case GET_ASI_EXCP:
2275 break;
2276 case GET_ASI_DTWINX: /* Reserved for stda. */
2277 gen_exception(dc, TT_ILL_INSN);
2278 break;
2279 case GET_ASI_DIRECT:
2280 gen_address_mask(dc, addr);
2281 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2282 break;
2283 default:
2285 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2286 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2288 save_state(dc);
2289 #ifdef TARGET_SPARC64
2290 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2291 #else
2293 TCGv_i64 t64 = tcg_temp_new_i64();
2294 tcg_gen_extu_tl_i64(t64, src);
2295 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2296 tcg_temp_free_i64(t64);
2298 #endif
2299 tcg_temp_free_i32(r_mop);
2300 tcg_temp_free_i32(r_asi);
2302 /* A write to a TLB register may alter page maps. End the TB. */
2303 dc->npc = DYNAMIC_PC;
2305 break;
2309 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2310 TCGv addr, int insn)
2312 DisasASI da = get_asi(dc, insn, MO_TEUL);
2314 switch (da.type) {
2315 case GET_ASI_EXCP:
2316 break;
2317 case GET_ASI_DIRECT:
2318 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2319 break;
2320 default:
2321 /* ??? Should be DAE_invalid_asi. */
2322 gen_exception(dc, TT_DATA_ACCESS);
2323 break;
2327 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv val2,
2328 int insn, int rd)
2330 DisasASI da = get_asi(dc, insn, MO_TEUL);
2331 TCGv val1, dst;
2332 TCGv_i32 r_asi;
2334 if (da.type == GET_ASI_EXCP) {
2335 return;
2338 save_state(dc);
2339 val1 = gen_load_gpr(dc, rd);
2340 dst = gen_dest_gpr(dc, rd);
2341 r_asi = tcg_const_i32(da.asi);
2342 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2343 tcg_temp_free_i32(r_asi);
2344 gen_store_gpr(dc, rd, dst);
2347 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2349 DisasASI da = get_asi(dc, insn, MO_UB);
2351 switch (da.type) {
2352 case GET_ASI_EXCP:
2353 break;
2354 default:
2356 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2357 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2358 TCGv_i64 s64, t64;
2360 save_state(dc);
2361 t64 = tcg_temp_new_i64();
2362 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2364 s64 = tcg_const_i64(0xff);
2365 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2366 tcg_temp_free_i64(s64);
2367 tcg_temp_free_i32(r_mop);
2368 tcg_temp_free_i32(r_asi);
2370 tcg_gen_trunc_i64_tl(dst, t64);
2371 tcg_temp_free_i64(t64);
2373 break;
2376 #endif
2378 #ifdef TARGET_SPARC64
2379 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2380 int insn, int size, int rd)
2382 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2383 TCGv_i32 d32;
2385 switch (da.type) {
2386 case GET_ASI_EXCP:
2387 break;
2389 case GET_ASI_DIRECT:
2390 gen_address_mask(dc, addr);
2391 switch (size) {
2392 case 4:
2393 d32 = gen_dest_fpr_F(dc);
2394 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2395 gen_store_fpr_F(dc, rd, d32);
2396 break;
2397 case 8:
2398 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2399 break;
2400 case 16:
2401 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2402 tcg_gen_addi_tl(addr, addr, 8);
2403 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2404 break;
2405 default:
2406 g_assert_not_reached();
2408 break;
2410 case GET_ASI_BLOCK:
2411 /* Valid for lddfa on aligned registers only. */
2412 if (size == 8 && (rd & 7) == 0) {
2413 TCGMemOp memop;
2414 TCGv eight;
2415 int i;
2417 gen_address_mask(dc, addr);
2419 /* The first operation checks required alignment. */
2420 memop = da.memop | MO_ALIGN_64;
2421 eight = tcg_const_tl(8);
2422 for (i = 0; ; ++i) {
2423 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2424 da.mem_idx, memop);
2425 if (i == 7) {
2426 break;
2428 tcg_gen_add_tl(addr, addr, eight);
2429 memop = da.memop;
2431 tcg_temp_free(eight);
2432 } else {
2433 gen_exception(dc, TT_ILL_INSN);
2435 break;
2437 case GET_ASI_SHORT:
2438 /* Valid for lddfa only. */
2439 if (size == 8) {
2440 gen_address_mask(dc, addr);
2441 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2442 } else {
2443 gen_exception(dc, TT_ILL_INSN);
2445 break;
2447 default:
2449 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2450 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2452 save_state(dc);
2453 /* According to the table in the UA2011 manual, the only
2454 other asis that are valid for ldfa/lddfa/ldqfa are
2455 the NO_FAULT asis. We still need a helper for these,
2456 but we can just use the integer asi helper for them. */
2457 switch (size) {
2458 case 4:
2460 TCGv d64 = tcg_temp_new_i64();
2461 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2462 d32 = gen_dest_fpr_F(dc);
2463 tcg_gen_extrl_i64_i32(d32, d64);
2464 tcg_temp_free_i64(d64);
2465 gen_store_fpr_F(dc, rd, d32);
2467 break;
2468 case 8:
2469 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2470 break;
2471 case 16:
2472 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2473 tcg_gen_addi_tl(addr, addr, 8);
2474 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2475 break;
2476 default:
2477 g_assert_not_reached();
2479 tcg_temp_free_i32(r_mop);
2480 tcg_temp_free_i32(r_asi);
2482 break;
2486 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2487 int insn, int size, int rd)
2489 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2490 TCGv_i32 d32;
2492 switch (da.type) {
2493 case GET_ASI_EXCP:
2494 break;
2496 case GET_ASI_DIRECT:
2497 gen_address_mask(dc, addr);
2498 switch (size) {
2499 case 4:
2500 d32 = gen_load_fpr_F(dc, rd);
2501 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2502 break;
2503 case 8:
2504 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2505 break;
2506 case 16:
2507 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2508 tcg_gen_addi_tl(addr, addr, 8);
2509 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2510 break;
2511 default:
2512 g_assert_not_reached();
2514 break;
2516 case GET_ASI_BLOCK:
2517 /* Valid for stdfa on aligned registers only. */
2518 if (size == 8 && (rd & 7) == 0) {
2519 TCGMemOp memop;
2520 TCGv eight;
2521 int i;
2523 gen_address_mask(dc, addr);
2525 /* The first operation checks required alignment. */
2526 memop = da.memop | MO_ALIGN_64;
2527 eight = tcg_const_tl(8);
2528 for (i = 0; ; ++i) {
2529 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2530 da.mem_idx, memop);
2531 if (i == 7) {
2532 break;
2534 tcg_gen_add_tl(addr, addr, eight);
2535 memop = da.memop;
2537 tcg_temp_free(eight);
2538 } else {
2539 gen_exception(dc, TT_ILL_INSN);
2541 break;
2543 case GET_ASI_SHORT:
2544 /* Valid for stdfa only. */
2545 if (size == 8) {
2546 gen_address_mask(dc, addr);
2547 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2548 } else {
2549 gen_exception(dc, TT_ILL_INSN);
2551 break;
2553 default:
2554 /* According to the table in the UA2011 manual, the only
2555 other asis that are valid for ldfa/lddfa/ldqfa are
2556 the PST* asis, which aren't currently handled. */
2557 gen_exception(dc, TT_ILL_INSN);
2558 break;
2562 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2564 DisasASI da = get_asi(dc, insn, MO_TEQ);
2565 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2566 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2568 switch (da.type) {
2569 case GET_ASI_EXCP:
2570 return;
2572 case GET_ASI_DTWINX:
2573 gen_address_mask(dc, addr);
2574 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2575 tcg_gen_addi_tl(addr, addr, 8);
2576 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2577 break;
2579 case GET_ASI_DIRECT:
2581 TCGv_i64 tmp = tcg_temp_new_i64();
2583 gen_address_mask(dc, addr);
2584 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2586 /* Note that LE ldda acts as if each 32-bit register
2587 result is byte swapped. Having just performed one
2588 64-bit bswap, we need now to swap the writebacks. */
2589 if ((da.memop & MO_BSWAP) == MO_TE) {
2590 tcg_gen_extr32_i64(lo, hi, tmp);
2591 } else {
2592 tcg_gen_extr32_i64(hi, lo, tmp);
2594 tcg_temp_free_i64(tmp);
2596 break;
2598 default:
2600 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2602 save_state(dc);
2603 gen_helper_ldda_asi(cpu_env, addr, r_asi);
2604 tcg_temp_free_i32(r_asi);
2606 tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUSPARCState, qt0.high));
2607 tcg_gen_ld_i64(lo, cpu_env, offsetof(CPUSPARCState, qt0.low));
2609 break;
2612 gen_store_gpr(dc, rd, hi);
2613 gen_store_gpr(dc, rd + 1, lo);
2616 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2617 int insn, int rd)
2619 DisasASI da = get_asi(dc, insn, MO_TEQ);
2620 TCGv lo = gen_load_gpr(dc, rd + 1);
2622 switch (da.type) {
2623 case GET_ASI_EXCP:
2624 break;
2626 case GET_ASI_DTWINX:
2627 gen_address_mask(dc, addr);
2628 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2629 tcg_gen_addi_tl(addr, addr, 8);
2630 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2631 break;
2633 case GET_ASI_DIRECT:
2635 TCGv_i64 t64 = tcg_temp_new_i64();
2637 /* Note that LE stda acts as if each 32-bit register result is
2638 byte swapped. We will perform one 64-bit LE store, so now
2639 we must swap the order of the construction. */
2640 if ((da.memop & MO_BSWAP) == MO_TE) {
2641 tcg_gen_concat32_i64(t64, lo, hi);
2642 } else {
2643 tcg_gen_concat32_i64(t64, hi, lo);
2645 gen_address_mask(dc, addr);
2646 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2647 tcg_temp_free_i64(t64);
2649 break;
2651 default:
2653 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2654 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2655 TCGv_i64 t64;
2657 save_state(dc);
2659 t64 = tcg_temp_new_i64();
2660 tcg_gen_concat_tl_i64(t64, lo, hi);
2661 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2662 tcg_temp_free_i32(r_mop);
2663 tcg_temp_free_i32(r_asi);
2664 tcg_temp_free_i64(t64);
2666 break;
2670 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv val2,
2671 int insn, int rd)
2673 DisasASI da = get_asi(dc, insn, MO_TEQ);
2674 TCGv val1 = gen_load_gpr(dc, rd);
2675 TCGv dst = gen_dest_gpr(dc, rd);
2676 TCGv_i32 r_asi;
2678 if (da.type == GET_ASI_EXCP) {
2679 return;
2682 save_state(dc);
2683 r_asi = tcg_const_i32(da.asi);
2684 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2685 tcg_temp_free_i32(r_asi);
2686 gen_store_gpr(dc, rd, dst);
2689 #elif !defined(CONFIG_USER_ONLY)
2690 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2692 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2693 whereby "rd + 1" elicits "error: array subscript is above array".
2694 Since we have already asserted that rd is even, the semantics
2695 are unchanged. */
2696 TCGv lo = gen_dest_gpr(dc, rd | 1);
2697 TCGv hi = gen_dest_gpr(dc, rd);
2698 TCGv_i64 t64 = tcg_temp_new_i64();
2699 DisasASI da = get_asi(dc, insn, MO_TEQ);
2701 switch (da.type) {
2702 case GET_ASI_EXCP:
2703 tcg_temp_free_i64(t64);
2704 return;
2705 case GET_ASI_DIRECT:
2706 gen_address_mask(dc, addr);
2707 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2708 break;
2709 default:
2711 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2712 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2714 save_state(dc);
2715 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2716 tcg_temp_free_i32(r_mop);
2717 tcg_temp_free_i32(r_asi);
2719 break;
2722 tcg_gen_extr_i64_i32(lo, hi, t64);
2723 tcg_temp_free_i64(t64);
2724 gen_store_gpr(dc, rd | 1, lo);
2725 gen_store_gpr(dc, rd, hi);
2728 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2729 int insn, int rd)
2731 DisasASI da = get_asi(dc, insn, MO_TEQ);
2732 TCGv lo = gen_load_gpr(dc, rd + 1);
2733 TCGv_i64 t64 = tcg_temp_new_i64();
2735 tcg_gen_concat_tl_i64(t64, lo, hi);
2737 switch (da.type) {
2738 case GET_ASI_EXCP:
2739 break;
2740 case GET_ASI_DIRECT:
2741 gen_address_mask(dc, addr);
2742 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2743 break;
2744 default:
2746 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2747 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2749 save_state(dc);
2750 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2751 tcg_temp_free_i32(r_mop);
2752 tcg_temp_free_i32(r_asi);
2754 break;
2757 tcg_temp_free_i64(t64);
2759 #endif
2761 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2763 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2764 return gen_load_gpr(dc, rs1);
2767 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2769 if (IS_IMM) { /* immediate */
2770 target_long simm = GET_FIELDs(insn, 19, 31);
2771 TCGv t = get_temp_tl(dc);
2772 tcg_gen_movi_tl(t, simm);
2773 return t;
2774 } else { /* register */
2775 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2776 return gen_load_gpr(dc, rs2);
2780 #ifdef TARGET_SPARC64
2781 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2783 TCGv_i32 c32, zero, dst, s1, s2;
2785 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2786 or fold the comparison down to 32 bits and use movcond_i32. Choose
2787 the later. */
2788 c32 = tcg_temp_new_i32();
2789 if (cmp->is_bool) {
2790 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2791 } else {
2792 TCGv_i64 c64 = tcg_temp_new_i64();
2793 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2794 tcg_gen_extrl_i64_i32(c32, c64);
2795 tcg_temp_free_i64(c64);
2798 s1 = gen_load_fpr_F(dc, rs);
2799 s2 = gen_load_fpr_F(dc, rd);
2800 dst = gen_dest_fpr_F(dc);
2801 zero = tcg_const_i32(0);
2803 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2805 tcg_temp_free_i32(c32);
2806 tcg_temp_free_i32(zero);
2807 gen_store_fpr_F(dc, rd, dst);
2810 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2812 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2813 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2814 gen_load_fpr_D(dc, rs),
2815 gen_load_fpr_D(dc, rd));
2816 gen_store_fpr_D(dc, rd, dst);
2819 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2821 int qd = QFPREG(rd);
2822 int qs = QFPREG(rs);
2824 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2825 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2826 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2827 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2829 gen_update_fprs_dirty(dc, qd);
2832 #ifndef CONFIG_USER_ONLY
2833 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2835 TCGv_i32 r_tl = tcg_temp_new_i32();
2837 /* load env->tl into r_tl */
2838 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2840 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2841 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2843 /* calculate offset to current trap state from env->ts, reuse r_tl */
2844 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2845 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2847 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2849 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2850 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2851 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2852 tcg_temp_free_ptr(r_tl_tmp);
2855 tcg_temp_free_i32(r_tl);
2857 #endif
2859 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2860 int width, bool cc, bool left)
2862 TCGv lo1, lo2, t1, t2;
2863 uint64_t amask, tabl, tabr;
2864 int shift, imask, omask;
2866 if (cc) {
2867 tcg_gen_mov_tl(cpu_cc_src, s1);
2868 tcg_gen_mov_tl(cpu_cc_src2, s2);
2869 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2870 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2871 dc->cc_op = CC_OP_SUB;
2874 /* Theory of operation: there are two tables, left and right (not to
2875 be confused with the left and right versions of the opcode). These
2876 are indexed by the low 3 bits of the inputs. To make things "easy",
2877 these tables are loaded into two constants, TABL and TABR below.
2878 The operation index = (input & imask) << shift calculates the index
2879 into the constant, while val = (table >> index) & omask calculates
2880 the value we're looking for. */
2881 switch (width) {
2882 case 8:
2883 imask = 0x7;
2884 shift = 3;
2885 omask = 0xff;
2886 if (left) {
2887 tabl = 0x80c0e0f0f8fcfeffULL;
2888 tabr = 0xff7f3f1f0f070301ULL;
2889 } else {
2890 tabl = 0x0103070f1f3f7fffULL;
2891 tabr = 0xfffefcf8f0e0c080ULL;
2893 break;
2894 case 16:
2895 imask = 0x6;
2896 shift = 1;
2897 omask = 0xf;
2898 if (left) {
2899 tabl = 0x8cef;
2900 tabr = 0xf731;
2901 } else {
2902 tabl = 0x137f;
2903 tabr = 0xfec8;
2905 break;
2906 case 32:
2907 imask = 0x4;
2908 shift = 0;
2909 omask = 0x3;
2910 if (left) {
2911 tabl = (2 << 2) | 3;
2912 tabr = (3 << 2) | 1;
2913 } else {
2914 tabl = (1 << 2) | 3;
2915 tabr = (3 << 2) | 2;
2917 break;
2918 default:
2919 abort();
2922 lo1 = tcg_temp_new();
2923 lo2 = tcg_temp_new();
2924 tcg_gen_andi_tl(lo1, s1, imask);
2925 tcg_gen_andi_tl(lo2, s2, imask);
2926 tcg_gen_shli_tl(lo1, lo1, shift);
2927 tcg_gen_shli_tl(lo2, lo2, shift);
2929 t1 = tcg_const_tl(tabl);
2930 t2 = tcg_const_tl(tabr);
2931 tcg_gen_shr_tl(lo1, t1, lo1);
2932 tcg_gen_shr_tl(lo2, t2, lo2);
2933 tcg_gen_andi_tl(dst, lo1, omask);
2934 tcg_gen_andi_tl(lo2, lo2, omask);
2936 amask = -8;
2937 if (AM_CHECK(dc)) {
2938 amask &= 0xffffffffULL;
2940 tcg_gen_andi_tl(s1, s1, amask);
2941 tcg_gen_andi_tl(s2, s2, amask);
2943 /* We want to compute
2944 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2945 We've already done dst = lo1, so this reduces to
2946 dst &= (s1 == s2 ? -1 : lo2)
2947 Which we perform by
2948 lo2 |= -(s1 == s2)
2949 dst &= lo2
2951 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2952 tcg_gen_neg_tl(t1, t1);
2953 tcg_gen_or_tl(lo2, lo2, t1);
2954 tcg_gen_and_tl(dst, dst, lo2);
2956 tcg_temp_free(lo1);
2957 tcg_temp_free(lo2);
2958 tcg_temp_free(t1);
2959 tcg_temp_free(t2);
2962 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2964 TCGv tmp = tcg_temp_new();
2966 tcg_gen_add_tl(tmp, s1, s2);
2967 tcg_gen_andi_tl(dst, tmp, -8);
2968 if (left) {
2969 tcg_gen_neg_tl(tmp, tmp);
2971 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2973 tcg_temp_free(tmp);
2976 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2978 TCGv t1, t2, shift;
2980 t1 = tcg_temp_new();
2981 t2 = tcg_temp_new();
2982 shift = tcg_temp_new();
2984 tcg_gen_andi_tl(shift, gsr, 7);
2985 tcg_gen_shli_tl(shift, shift, 3);
2986 tcg_gen_shl_tl(t1, s1, shift);
2988 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2989 shift of (up to 63) followed by a constant shift of 1. */
2990 tcg_gen_xori_tl(shift, shift, 63);
2991 tcg_gen_shr_tl(t2, s2, shift);
2992 tcg_gen_shri_tl(t2, t2, 1);
2994 tcg_gen_or_tl(dst, t1, t2);
2996 tcg_temp_free(t1);
2997 tcg_temp_free(t2);
2998 tcg_temp_free(shift);
3000 #endif
3002 #define CHECK_IU_FEATURE(dc, FEATURE) \
3003 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3004 goto illegal_insn;
3005 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3006 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3007 goto nfpu_insn;
3009 /* before an instruction, dc->pc must be static */
3010 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3012 unsigned int opc, rs1, rs2, rd;
3013 TCGv cpu_src1, cpu_src2;
3014 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3015 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3016 target_long simm;
3018 opc = GET_FIELD(insn, 0, 1);
3019 rd = GET_FIELD(insn, 2, 6);
3021 switch (opc) {
3022 case 0: /* branches/sethi */
3024 unsigned int xop = GET_FIELD(insn, 7, 9);
3025 int32_t target;
3026 switch (xop) {
3027 #ifdef TARGET_SPARC64
3028 case 0x1: /* V9 BPcc */
3030 int cc;
3032 target = GET_FIELD_SP(insn, 0, 18);
3033 target = sign_extend(target, 19);
3034 target <<= 2;
3035 cc = GET_FIELD_SP(insn, 20, 21);
3036 if (cc == 0)
3037 do_branch(dc, target, insn, 0);
3038 else if (cc == 2)
3039 do_branch(dc, target, insn, 1);
3040 else
3041 goto illegal_insn;
3042 goto jmp_insn;
3044 case 0x3: /* V9 BPr */
3046 target = GET_FIELD_SP(insn, 0, 13) |
3047 (GET_FIELD_SP(insn, 20, 21) << 14);
3048 target = sign_extend(target, 16);
3049 target <<= 2;
3050 cpu_src1 = get_src1(dc, insn);
3051 do_branch_reg(dc, target, insn, cpu_src1);
3052 goto jmp_insn;
3054 case 0x5: /* V9 FBPcc */
3056 int cc = GET_FIELD_SP(insn, 20, 21);
3057 if (gen_trap_ifnofpu(dc)) {
3058 goto jmp_insn;
3060 target = GET_FIELD_SP(insn, 0, 18);
3061 target = sign_extend(target, 19);
3062 target <<= 2;
3063 do_fbranch(dc, target, insn, cc);
3064 goto jmp_insn;
3066 #else
3067 case 0x7: /* CBN+x */
3069 goto ncp_insn;
3071 #endif
3072 case 0x2: /* BN+x */
3074 target = GET_FIELD(insn, 10, 31);
3075 target = sign_extend(target, 22);
3076 target <<= 2;
3077 do_branch(dc, target, insn, 0);
3078 goto jmp_insn;
3080 case 0x6: /* FBN+x */
3082 if (gen_trap_ifnofpu(dc)) {
3083 goto jmp_insn;
3085 target = GET_FIELD(insn, 10, 31);
3086 target = sign_extend(target, 22);
3087 target <<= 2;
3088 do_fbranch(dc, target, insn, 0);
3089 goto jmp_insn;
3091 case 0x4: /* SETHI */
3092 /* Special-case %g0 because that's the canonical nop. */
3093 if (rd) {
3094 uint32_t value = GET_FIELD(insn, 10, 31);
3095 TCGv t = gen_dest_gpr(dc, rd);
3096 tcg_gen_movi_tl(t, value << 10);
3097 gen_store_gpr(dc, rd, t);
3099 break;
3100 case 0x0: /* UNIMPL */
3101 default:
3102 goto illegal_insn;
3104 break;
3106 break;
3107 case 1: /*CALL*/
3109 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3110 TCGv o7 = gen_dest_gpr(dc, 15);
3112 tcg_gen_movi_tl(o7, dc->pc);
3113 gen_store_gpr(dc, 15, o7);
3114 target += dc->pc;
3115 gen_mov_pc_npc(dc);
3116 #ifdef TARGET_SPARC64
3117 if (unlikely(AM_CHECK(dc))) {
3118 target &= 0xffffffffULL;
3120 #endif
3121 dc->npc = target;
3123 goto jmp_insn;
3124 case 2: /* FPU & Logical Operations */
3126 unsigned int xop = GET_FIELD(insn, 7, 12);
3127 TCGv cpu_dst = get_temp_tl(dc);
3128 TCGv cpu_tmp0;
3130 if (xop == 0x3a) { /* generate trap */
3131 int cond = GET_FIELD(insn, 3, 6);
3132 TCGv_i32 trap;
3133 TCGLabel *l1 = NULL;
3134 int mask;
3136 if (cond == 0) {
3137 /* Trap never. */
3138 break;
3141 save_state(dc);
3143 if (cond != 8) {
3144 /* Conditional trap. */
3145 DisasCompare cmp;
3146 #ifdef TARGET_SPARC64
3147 /* V9 icc/xcc */
3148 int cc = GET_FIELD_SP(insn, 11, 12);
3149 if (cc == 0) {
3150 gen_compare(&cmp, 0, cond, dc);
3151 } else if (cc == 2) {
3152 gen_compare(&cmp, 1, cond, dc);
3153 } else {
3154 goto illegal_insn;
3156 #else
3157 gen_compare(&cmp, 0, cond, dc);
3158 #endif
3159 l1 = gen_new_label();
3160 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3161 cmp.c1, cmp.c2, l1);
3162 free_compare(&cmp);
3165 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3166 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3168 /* Don't use the normal temporaries, as they may well have
3169 gone out of scope with the branch above. While we're
3170 doing that we might as well pre-truncate to 32-bit. */
3171 trap = tcg_temp_new_i32();
3173 rs1 = GET_FIELD_SP(insn, 14, 18);
3174 if (IS_IMM) {
3175 rs2 = GET_FIELD_SP(insn, 0, 6);
3176 if (rs1 == 0) {
3177 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3178 /* Signal that the trap value is fully constant. */
3179 mask = 0;
3180 } else {
3181 TCGv t1 = gen_load_gpr(dc, rs1);
3182 tcg_gen_trunc_tl_i32(trap, t1);
3183 tcg_gen_addi_i32(trap, trap, rs2);
3185 } else {
3186 TCGv t1, t2;
3187 rs2 = GET_FIELD_SP(insn, 0, 4);
3188 t1 = gen_load_gpr(dc, rs1);
3189 t2 = gen_load_gpr(dc, rs2);
3190 tcg_gen_add_tl(t1, t1, t2);
3191 tcg_gen_trunc_tl_i32(trap, t1);
3193 if (mask != 0) {
3194 tcg_gen_andi_i32(trap, trap, mask);
3195 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3198 gen_helper_raise_exception(cpu_env, trap);
3199 tcg_temp_free_i32(trap);
3201 if (cond == 8) {
3202 /* An unconditional trap ends the TB. */
3203 dc->is_br = 1;
3204 goto jmp_insn;
3205 } else {
3206 /* A conditional trap falls through to the next insn. */
3207 gen_set_label(l1);
3208 break;
3210 } else if (xop == 0x28) {
3211 rs1 = GET_FIELD(insn, 13, 17);
3212 switch(rs1) {
3213 case 0: /* rdy */
3214 #ifndef TARGET_SPARC64
3215 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3216 manual, rdy on the microSPARC
3217 II */
3218 case 0x0f: /* stbar in the SPARCv8 manual,
3219 rdy on the microSPARC II */
3220 case 0x10 ... 0x1f: /* implementation-dependent in the
3221 SPARCv8 manual, rdy on the
3222 microSPARC II */
3223 /* Read Asr17 */
3224 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3225 TCGv t = gen_dest_gpr(dc, rd);
3226 /* Read Asr17 for a Leon3 monoprocessor */
3227 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3228 gen_store_gpr(dc, rd, t);
3229 break;
3231 #endif
3232 gen_store_gpr(dc, rd, cpu_y);
3233 break;
3234 #ifdef TARGET_SPARC64
3235 case 0x2: /* V9 rdccr */
3236 update_psr(dc);
3237 gen_helper_rdccr(cpu_dst, cpu_env);
3238 gen_store_gpr(dc, rd, cpu_dst);
3239 break;
3240 case 0x3: /* V9 rdasi */
3241 tcg_gen_movi_tl(cpu_dst, dc->asi);
3242 gen_store_gpr(dc, rd, cpu_dst);
3243 break;
3244 case 0x4: /* V9 rdtick */
3246 TCGv_ptr r_tickptr;
3247 TCGv_i32 r_const;
3249 r_tickptr = tcg_temp_new_ptr();
3250 r_const = tcg_const_i32(dc->mem_idx);
3251 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3252 offsetof(CPUSPARCState, tick));
3253 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3254 r_const);
3255 tcg_temp_free_ptr(r_tickptr);
3256 tcg_temp_free_i32(r_const);
3257 gen_store_gpr(dc, rd, cpu_dst);
3259 break;
3260 case 0x5: /* V9 rdpc */
3262 TCGv t = gen_dest_gpr(dc, rd);
3263 if (unlikely(AM_CHECK(dc))) {
3264 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3265 } else {
3266 tcg_gen_movi_tl(t, dc->pc);
3268 gen_store_gpr(dc, rd, t);
3270 break;
3271 case 0x6: /* V9 rdfprs */
3272 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3273 gen_store_gpr(dc, rd, cpu_dst);
3274 break;
3275 case 0xf: /* V9 membar */
3276 break; /* no effect */
3277 case 0x13: /* Graphics Status */
3278 if (gen_trap_ifnofpu(dc)) {
3279 goto jmp_insn;
3281 gen_store_gpr(dc, rd, cpu_gsr);
3282 break;
3283 case 0x16: /* Softint */
3284 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3285 offsetof(CPUSPARCState, softint));
3286 gen_store_gpr(dc, rd, cpu_dst);
3287 break;
3288 case 0x17: /* Tick compare */
3289 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3290 break;
3291 case 0x18: /* System tick */
3293 TCGv_ptr r_tickptr;
3294 TCGv_i32 r_const;
3296 r_tickptr = tcg_temp_new_ptr();
3297 r_const = tcg_const_i32(dc->mem_idx);
3298 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3299 offsetof(CPUSPARCState, stick));
3300 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3301 r_const);
3302 tcg_temp_free_ptr(r_tickptr);
3303 tcg_temp_free_i32(r_const);
3304 gen_store_gpr(dc, rd, cpu_dst);
3306 break;
3307 case 0x19: /* System tick compare */
3308 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3309 break;
3310 case 0x10: /* Performance Control */
3311 case 0x11: /* Performance Instrumentation Counter */
3312 case 0x12: /* Dispatch Control */
3313 case 0x14: /* Softint set, WO */
3314 case 0x15: /* Softint clear, WO */
3315 #endif
3316 default:
3317 goto illegal_insn;
3319 #if !defined(CONFIG_USER_ONLY)
3320 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3321 #ifndef TARGET_SPARC64
3322 if (!supervisor(dc)) {
3323 goto priv_insn;
3325 update_psr(dc);
3326 gen_helper_rdpsr(cpu_dst, cpu_env);
3327 #else
3328 CHECK_IU_FEATURE(dc, HYPV);
3329 if (!hypervisor(dc))
3330 goto priv_insn;
3331 rs1 = GET_FIELD(insn, 13, 17);
3332 switch (rs1) {
3333 case 0: // hpstate
3334 // gen_op_rdhpstate();
3335 break;
3336 case 1: // htstate
3337 // gen_op_rdhtstate();
3338 break;
3339 case 3: // hintp
3340 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3341 break;
3342 case 5: // htba
3343 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3344 break;
3345 case 6: // hver
3346 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3347 break;
3348 case 31: // hstick_cmpr
3349 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3350 break;
3351 default:
3352 goto illegal_insn;
3354 #endif
3355 gen_store_gpr(dc, rd, cpu_dst);
3356 break;
3357 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3358 if (!supervisor(dc)) {
3359 goto priv_insn;
3361 cpu_tmp0 = get_temp_tl(dc);
3362 #ifdef TARGET_SPARC64
3363 rs1 = GET_FIELD(insn, 13, 17);
3364 switch (rs1) {
3365 case 0: // tpc
3367 TCGv_ptr r_tsptr;
3369 r_tsptr = tcg_temp_new_ptr();
3370 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3371 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3372 offsetof(trap_state, tpc));
3373 tcg_temp_free_ptr(r_tsptr);
3375 break;
3376 case 1: // tnpc
3378 TCGv_ptr r_tsptr;
3380 r_tsptr = tcg_temp_new_ptr();
3381 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3382 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3383 offsetof(trap_state, tnpc));
3384 tcg_temp_free_ptr(r_tsptr);
3386 break;
3387 case 2: // tstate
3389 TCGv_ptr r_tsptr;
3391 r_tsptr = tcg_temp_new_ptr();
3392 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3393 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3394 offsetof(trap_state, tstate));
3395 tcg_temp_free_ptr(r_tsptr);
3397 break;
3398 case 3: // tt
3400 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3402 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3403 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3404 offsetof(trap_state, tt));
3405 tcg_temp_free_ptr(r_tsptr);
3407 break;
3408 case 4: // tick
3410 TCGv_ptr r_tickptr;
3411 TCGv_i32 r_const;
3413 r_tickptr = tcg_temp_new_ptr();
3414 r_const = tcg_const_i32(dc->mem_idx);
3415 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3416 offsetof(CPUSPARCState, tick));
3417 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3418 r_tickptr, r_const);
3419 tcg_temp_free_ptr(r_tickptr);
3420 tcg_temp_free_i32(r_const);
3422 break;
3423 case 5: // tba
3424 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3425 break;
3426 case 6: // pstate
3427 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3428 offsetof(CPUSPARCState, pstate));
3429 break;
3430 case 7: // tl
3431 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3432 offsetof(CPUSPARCState, tl));
3433 break;
3434 case 8: // pil
3435 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3436 offsetof(CPUSPARCState, psrpil));
3437 break;
3438 case 9: // cwp
3439 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3440 break;
3441 case 10: // cansave
3442 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3443 offsetof(CPUSPARCState, cansave));
3444 break;
3445 case 11: // canrestore
3446 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3447 offsetof(CPUSPARCState, canrestore));
3448 break;
3449 case 12: // cleanwin
3450 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3451 offsetof(CPUSPARCState, cleanwin));
3452 break;
3453 case 13: // otherwin
3454 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3455 offsetof(CPUSPARCState, otherwin));
3456 break;
3457 case 14: // wstate
3458 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3459 offsetof(CPUSPARCState, wstate));
3460 break;
3461 case 16: // UA2005 gl
3462 CHECK_IU_FEATURE(dc, GL);
3463 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3464 offsetof(CPUSPARCState, gl));
3465 break;
3466 case 26: // UA2005 strand status
3467 CHECK_IU_FEATURE(dc, HYPV);
3468 if (!hypervisor(dc))
3469 goto priv_insn;
3470 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3471 break;
3472 case 31: // ver
3473 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3474 break;
3475 case 15: // fq
3476 default:
3477 goto illegal_insn;
3479 #else
3480 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3481 #endif
3482 gen_store_gpr(dc, rd, cpu_tmp0);
3483 break;
3484 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3485 #ifdef TARGET_SPARC64
3486 gen_helper_flushw(cpu_env);
3487 #else
3488 if (!supervisor(dc))
3489 goto priv_insn;
3490 gen_store_gpr(dc, rd, cpu_tbr);
3491 #endif
3492 break;
3493 #endif
3494 } else if (xop == 0x34) { /* FPU Operations */
3495 if (gen_trap_ifnofpu(dc)) {
3496 goto jmp_insn;
3498 gen_op_clear_ieee_excp_and_FTT();
3499 rs1 = GET_FIELD(insn, 13, 17);
3500 rs2 = GET_FIELD(insn, 27, 31);
3501 xop = GET_FIELD(insn, 18, 26);
3503 switch (xop) {
3504 case 0x1: /* fmovs */
3505 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3506 gen_store_fpr_F(dc, rd, cpu_src1_32);
3507 break;
3508 case 0x5: /* fnegs */
3509 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3510 break;
3511 case 0x9: /* fabss */
3512 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3513 break;
3514 case 0x29: /* fsqrts */
3515 CHECK_FPU_FEATURE(dc, FSQRT);
3516 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3517 break;
3518 case 0x2a: /* fsqrtd */
3519 CHECK_FPU_FEATURE(dc, FSQRT);
3520 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3521 break;
3522 case 0x2b: /* fsqrtq */
3523 CHECK_FPU_FEATURE(dc, FLOAT128);
3524 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3525 break;
3526 case 0x41: /* fadds */
3527 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3528 break;
3529 case 0x42: /* faddd */
3530 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3531 break;
3532 case 0x43: /* faddq */
3533 CHECK_FPU_FEATURE(dc, FLOAT128);
3534 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3535 break;
3536 case 0x45: /* fsubs */
3537 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3538 break;
3539 case 0x46: /* fsubd */
3540 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3541 break;
3542 case 0x47: /* fsubq */
3543 CHECK_FPU_FEATURE(dc, FLOAT128);
3544 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3545 break;
3546 case 0x49: /* fmuls */
3547 CHECK_FPU_FEATURE(dc, FMUL);
3548 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3549 break;
3550 case 0x4a: /* fmuld */
3551 CHECK_FPU_FEATURE(dc, FMUL);
3552 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3553 break;
3554 case 0x4b: /* fmulq */
3555 CHECK_FPU_FEATURE(dc, FLOAT128);
3556 CHECK_FPU_FEATURE(dc, FMUL);
3557 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3558 break;
3559 case 0x4d: /* fdivs */
3560 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3561 break;
3562 case 0x4e: /* fdivd */
3563 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3564 break;
3565 case 0x4f: /* fdivq */
3566 CHECK_FPU_FEATURE(dc, FLOAT128);
3567 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3568 break;
3569 case 0x69: /* fsmuld */
3570 CHECK_FPU_FEATURE(dc, FSMULD);
3571 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3572 break;
3573 case 0x6e: /* fdmulq */
3574 CHECK_FPU_FEATURE(dc, FLOAT128);
3575 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3576 break;
3577 case 0xc4: /* fitos */
3578 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3579 break;
3580 case 0xc6: /* fdtos */
3581 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3582 break;
3583 case 0xc7: /* fqtos */
3584 CHECK_FPU_FEATURE(dc, FLOAT128);
3585 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3586 break;
3587 case 0xc8: /* fitod */
3588 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3589 break;
3590 case 0xc9: /* fstod */
3591 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3592 break;
3593 case 0xcb: /* fqtod */
3594 CHECK_FPU_FEATURE(dc, FLOAT128);
3595 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3596 break;
3597 case 0xcc: /* fitoq */
3598 CHECK_FPU_FEATURE(dc, FLOAT128);
3599 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3600 break;
3601 case 0xcd: /* fstoq */
3602 CHECK_FPU_FEATURE(dc, FLOAT128);
3603 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3604 break;
3605 case 0xce: /* fdtoq */
3606 CHECK_FPU_FEATURE(dc, FLOAT128);
3607 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3608 break;
3609 case 0xd1: /* fstoi */
3610 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3611 break;
3612 case 0xd2: /* fdtoi */
3613 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3614 break;
3615 case 0xd3: /* fqtoi */
3616 CHECK_FPU_FEATURE(dc, FLOAT128);
3617 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3618 break;
3619 #ifdef TARGET_SPARC64
3620 case 0x2: /* V9 fmovd */
3621 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3622 gen_store_fpr_D(dc, rd, cpu_src1_64);
3623 break;
3624 case 0x3: /* V9 fmovq */
3625 CHECK_FPU_FEATURE(dc, FLOAT128);
3626 gen_move_Q(dc, rd, rs2);
3627 break;
3628 case 0x6: /* V9 fnegd */
3629 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3630 break;
3631 case 0x7: /* V9 fnegq */
3632 CHECK_FPU_FEATURE(dc, FLOAT128);
3633 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3634 break;
3635 case 0xa: /* V9 fabsd */
3636 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3637 break;
3638 case 0xb: /* V9 fabsq */
3639 CHECK_FPU_FEATURE(dc, FLOAT128);
3640 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3641 break;
3642 case 0x81: /* V9 fstox */
3643 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3644 break;
3645 case 0x82: /* V9 fdtox */
3646 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3647 break;
3648 case 0x83: /* V9 fqtox */
3649 CHECK_FPU_FEATURE(dc, FLOAT128);
3650 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3651 break;
3652 case 0x84: /* V9 fxtos */
3653 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3654 break;
3655 case 0x88: /* V9 fxtod */
3656 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3657 break;
3658 case 0x8c: /* V9 fxtoq */
3659 CHECK_FPU_FEATURE(dc, FLOAT128);
3660 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3661 break;
3662 #endif
3663 default:
3664 goto illegal_insn;
3666 } else if (xop == 0x35) { /* FPU Operations */
3667 #ifdef TARGET_SPARC64
3668 int cond;
3669 #endif
3670 if (gen_trap_ifnofpu(dc)) {
3671 goto jmp_insn;
3673 gen_op_clear_ieee_excp_and_FTT();
3674 rs1 = GET_FIELD(insn, 13, 17);
3675 rs2 = GET_FIELD(insn, 27, 31);
3676 xop = GET_FIELD(insn, 18, 26);
3678 #ifdef TARGET_SPARC64
3679 #define FMOVR(sz) \
3680 do { \
3681 DisasCompare cmp; \
3682 cond = GET_FIELD_SP(insn, 10, 12); \
3683 cpu_src1 = get_src1(dc, insn); \
3684 gen_compare_reg(&cmp, cond, cpu_src1); \
3685 gen_fmov##sz(dc, &cmp, rd, rs2); \
3686 free_compare(&cmp); \
3687 } while (0)
3689 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3690 FMOVR(s);
3691 break;
3692 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3693 FMOVR(d);
3694 break;
3695 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3696 CHECK_FPU_FEATURE(dc, FLOAT128);
3697 FMOVR(q);
3698 break;
3700 #undef FMOVR
3701 #endif
3702 switch (xop) {
3703 #ifdef TARGET_SPARC64
3704 #define FMOVCC(fcc, sz) \
3705 do { \
3706 DisasCompare cmp; \
3707 cond = GET_FIELD_SP(insn, 14, 17); \
3708 gen_fcompare(&cmp, fcc, cond); \
3709 gen_fmov##sz(dc, &cmp, rd, rs2); \
3710 free_compare(&cmp); \
3711 } while (0)
3713 case 0x001: /* V9 fmovscc %fcc0 */
3714 FMOVCC(0, s);
3715 break;
3716 case 0x002: /* V9 fmovdcc %fcc0 */
3717 FMOVCC(0, d);
3718 break;
3719 case 0x003: /* V9 fmovqcc %fcc0 */
3720 CHECK_FPU_FEATURE(dc, FLOAT128);
3721 FMOVCC(0, q);
3722 break;
3723 case 0x041: /* V9 fmovscc %fcc1 */
3724 FMOVCC(1, s);
3725 break;
3726 case 0x042: /* V9 fmovdcc %fcc1 */
3727 FMOVCC(1, d);
3728 break;
3729 case 0x043: /* V9 fmovqcc %fcc1 */
3730 CHECK_FPU_FEATURE(dc, FLOAT128);
3731 FMOVCC(1, q);
3732 break;
3733 case 0x081: /* V9 fmovscc %fcc2 */
3734 FMOVCC(2, s);
3735 break;
3736 case 0x082: /* V9 fmovdcc %fcc2 */
3737 FMOVCC(2, d);
3738 break;
3739 case 0x083: /* V9 fmovqcc %fcc2 */
3740 CHECK_FPU_FEATURE(dc, FLOAT128);
3741 FMOVCC(2, q);
3742 break;
3743 case 0x0c1: /* V9 fmovscc %fcc3 */
3744 FMOVCC(3, s);
3745 break;
3746 case 0x0c2: /* V9 fmovdcc %fcc3 */
3747 FMOVCC(3, d);
3748 break;
3749 case 0x0c3: /* V9 fmovqcc %fcc3 */
3750 CHECK_FPU_FEATURE(dc, FLOAT128);
3751 FMOVCC(3, q);
3752 break;
3753 #undef FMOVCC
3754 #define FMOVCC(xcc, sz) \
3755 do { \
3756 DisasCompare cmp; \
3757 cond = GET_FIELD_SP(insn, 14, 17); \
3758 gen_compare(&cmp, xcc, cond, dc); \
3759 gen_fmov##sz(dc, &cmp, rd, rs2); \
3760 free_compare(&cmp); \
3761 } while (0)
3763 case 0x101: /* V9 fmovscc %icc */
3764 FMOVCC(0, s);
3765 break;
3766 case 0x102: /* V9 fmovdcc %icc */
3767 FMOVCC(0, d);
3768 break;
3769 case 0x103: /* V9 fmovqcc %icc */
3770 CHECK_FPU_FEATURE(dc, FLOAT128);
3771 FMOVCC(0, q);
3772 break;
3773 case 0x181: /* V9 fmovscc %xcc */
3774 FMOVCC(1, s);
3775 break;
3776 case 0x182: /* V9 fmovdcc %xcc */
3777 FMOVCC(1, d);
3778 break;
3779 case 0x183: /* V9 fmovqcc %xcc */
3780 CHECK_FPU_FEATURE(dc, FLOAT128);
3781 FMOVCC(1, q);
3782 break;
3783 #undef FMOVCC
3784 #endif
3785 case 0x51: /* fcmps, V9 %fcc */
3786 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3787 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3788 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3789 break;
3790 case 0x52: /* fcmpd, V9 %fcc */
3791 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3792 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3793 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3794 break;
3795 case 0x53: /* fcmpq, V9 %fcc */
3796 CHECK_FPU_FEATURE(dc, FLOAT128);
3797 gen_op_load_fpr_QT0(QFPREG(rs1));
3798 gen_op_load_fpr_QT1(QFPREG(rs2));
3799 gen_op_fcmpq(rd & 3);
3800 break;
3801 case 0x55: /* fcmpes, V9 %fcc */
3802 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3803 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3804 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3805 break;
3806 case 0x56: /* fcmped, V9 %fcc */
3807 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3808 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3809 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3810 break;
3811 case 0x57: /* fcmpeq, V9 %fcc */
3812 CHECK_FPU_FEATURE(dc, FLOAT128);
3813 gen_op_load_fpr_QT0(QFPREG(rs1));
3814 gen_op_load_fpr_QT1(QFPREG(rs2));
3815 gen_op_fcmpeq(rd & 3);
3816 break;
3817 default:
3818 goto illegal_insn;
3820 } else if (xop == 0x2) {
3821 TCGv dst = gen_dest_gpr(dc, rd);
3822 rs1 = GET_FIELD(insn, 13, 17);
3823 if (rs1 == 0) {
3824 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3825 if (IS_IMM) { /* immediate */
3826 simm = GET_FIELDs(insn, 19, 31);
3827 tcg_gen_movi_tl(dst, simm);
3828 gen_store_gpr(dc, rd, dst);
3829 } else { /* register */
3830 rs2 = GET_FIELD(insn, 27, 31);
3831 if (rs2 == 0) {
3832 tcg_gen_movi_tl(dst, 0);
3833 gen_store_gpr(dc, rd, dst);
3834 } else {
3835 cpu_src2 = gen_load_gpr(dc, rs2);
3836 gen_store_gpr(dc, rd, cpu_src2);
3839 } else {
3840 cpu_src1 = get_src1(dc, insn);
3841 if (IS_IMM) { /* immediate */
3842 simm = GET_FIELDs(insn, 19, 31);
3843 tcg_gen_ori_tl(dst, cpu_src1, simm);
3844 gen_store_gpr(dc, rd, dst);
3845 } else { /* register */
3846 rs2 = GET_FIELD(insn, 27, 31);
3847 if (rs2 == 0) {
3848 /* mov shortcut: or x, %g0, y -> mov x, y */
3849 gen_store_gpr(dc, rd, cpu_src1);
3850 } else {
3851 cpu_src2 = gen_load_gpr(dc, rs2);
3852 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3853 gen_store_gpr(dc, rd, dst);
3857 #ifdef TARGET_SPARC64
3858 } else if (xop == 0x25) { /* sll, V9 sllx */
3859 cpu_src1 = get_src1(dc, insn);
3860 if (IS_IMM) { /* immediate */
3861 simm = GET_FIELDs(insn, 20, 31);
3862 if (insn & (1 << 12)) {
3863 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3864 } else {
3865 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3867 } else { /* register */
3868 rs2 = GET_FIELD(insn, 27, 31);
3869 cpu_src2 = gen_load_gpr(dc, rs2);
3870 cpu_tmp0 = get_temp_tl(dc);
3871 if (insn & (1 << 12)) {
3872 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3873 } else {
3874 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3876 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3878 gen_store_gpr(dc, rd, cpu_dst);
3879 } else if (xop == 0x26) { /* srl, V9 srlx */
3880 cpu_src1 = get_src1(dc, insn);
3881 if (IS_IMM) { /* immediate */
3882 simm = GET_FIELDs(insn, 20, 31);
3883 if (insn & (1 << 12)) {
3884 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3885 } else {
3886 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3887 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3889 } else { /* register */
3890 rs2 = GET_FIELD(insn, 27, 31);
3891 cpu_src2 = gen_load_gpr(dc, rs2);
3892 cpu_tmp0 = get_temp_tl(dc);
3893 if (insn & (1 << 12)) {
3894 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3895 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3896 } else {
3897 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3898 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3899 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3902 gen_store_gpr(dc, rd, cpu_dst);
3903 } else if (xop == 0x27) { /* sra, V9 srax */
3904 cpu_src1 = get_src1(dc, insn);
3905 if (IS_IMM) { /* immediate */
3906 simm = GET_FIELDs(insn, 20, 31);
3907 if (insn & (1 << 12)) {
3908 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3909 } else {
3910 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3911 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3913 } else { /* register */
3914 rs2 = GET_FIELD(insn, 27, 31);
3915 cpu_src2 = gen_load_gpr(dc, rs2);
3916 cpu_tmp0 = get_temp_tl(dc);
3917 if (insn & (1 << 12)) {
3918 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3919 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3920 } else {
3921 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3922 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3923 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3926 gen_store_gpr(dc, rd, cpu_dst);
3927 #endif
3928 } else if (xop < 0x36) {
3929 if (xop < 0x20) {
3930 cpu_src1 = get_src1(dc, insn);
3931 cpu_src2 = get_src2(dc, insn);
3932 switch (xop & ~0x10) {
3933 case 0x0: /* add */
3934 if (xop & 0x10) {
3935 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3936 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3937 dc->cc_op = CC_OP_ADD;
3938 } else {
3939 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3941 break;
3942 case 0x1: /* and */
3943 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3944 if (xop & 0x10) {
3945 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3946 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3947 dc->cc_op = CC_OP_LOGIC;
3949 break;
3950 case 0x2: /* or */
3951 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3952 if (xop & 0x10) {
3953 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3954 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3955 dc->cc_op = CC_OP_LOGIC;
3957 break;
3958 case 0x3: /* xor */
3959 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3960 if (xop & 0x10) {
3961 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3962 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3963 dc->cc_op = CC_OP_LOGIC;
3965 break;
3966 case 0x4: /* sub */
3967 if (xop & 0x10) {
3968 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3969 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3970 dc->cc_op = CC_OP_SUB;
3971 } else {
3972 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3974 break;
3975 case 0x5: /* andn */
3976 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3977 if (xop & 0x10) {
3978 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3979 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3980 dc->cc_op = CC_OP_LOGIC;
3982 break;
3983 case 0x6: /* orn */
3984 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3985 if (xop & 0x10) {
3986 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3987 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3988 dc->cc_op = CC_OP_LOGIC;
3990 break;
3991 case 0x7: /* xorn */
3992 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3993 if (xop & 0x10) {
3994 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3995 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3996 dc->cc_op = CC_OP_LOGIC;
3998 break;
3999 case 0x8: /* addx, V9 addc */
4000 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4001 (xop & 0x10));
4002 break;
4003 #ifdef TARGET_SPARC64
4004 case 0x9: /* V9 mulx */
4005 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4006 break;
4007 #endif
4008 case 0xa: /* umul */
4009 CHECK_IU_FEATURE(dc, MUL);
4010 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4011 if (xop & 0x10) {
4012 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4013 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4014 dc->cc_op = CC_OP_LOGIC;
4016 break;
4017 case 0xb: /* smul */
4018 CHECK_IU_FEATURE(dc, MUL);
4019 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4020 if (xop & 0x10) {
4021 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4022 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4023 dc->cc_op = CC_OP_LOGIC;
4025 break;
4026 case 0xc: /* subx, V9 subc */
4027 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4028 (xop & 0x10));
4029 break;
4030 #ifdef TARGET_SPARC64
4031 case 0xd: /* V9 udivx */
4032 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4033 break;
4034 #endif
4035 case 0xe: /* udiv */
4036 CHECK_IU_FEATURE(dc, DIV);
4037 if (xop & 0x10) {
4038 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4039 cpu_src2);
4040 dc->cc_op = CC_OP_DIV;
4041 } else {
4042 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4043 cpu_src2);
4045 break;
4046 case 0xf: /* sdiv */
4047 CHECK_IU_FEATURE(dc, DIV);
4048 if (xop & 0x10) {
4049 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4050 cpu_src2);
4051 dc->cc_op = CC_OP_DIV;
4052 } else {
4053 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4054 cpu_src2);
4056 break;
4057 default:
4058 goto illegal_insn;
4060 gen_store_gpr(dc, rd, cpu_dst);
4061 } else {
4062 cpu_src1 = get_src1(dc, insn);
4063 cpu_src2 = get_src2(dc, insn);
4064 switch (xop) {
4065 case 0x20: /* taddcc */
4066 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4067 gen_store_gpr(dc, rd, cpu_dst);
4068 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4069 dc->cc_op = CC_OP_TADD;
4070 break;
4071 case 0x21: /* tsubcc */
4072 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4073 gen_store_gpr(dc, rd, cpu_dst);
4074 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4075 dc->cc_op = CC_OP_TSUB;
4076 break;
4077 case 0x22: /* taddcctv */
4078 gen_helper_taddcctv(cpu_dst, cpu_env,
4079 cpu_src1, cpu_src2);
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 dc->cc_op = CC_OP_TADDTV;
4082 break;
4083 case 0x23: /* tsubcctv */
4084 gen_helper_tsubcctv(cpu_dst, cpu_env,
4085 cpu_src1, cpu_src2);
4086 gen_store_gpr(dc, rd, cpu_dst);
4087 dc->cc_op = CC_OP_TSUBTV;
4088 break;
4089 case 0x24: /* mulscc */
4090 update_psr(dc);
4091 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4092 gen_store_gpr(dc, rd, cpu_dst);
4093 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4094 dc->cc_op = CC_OP_ADD;
4095 break;
4096 #ifndef TARGET_SPARC64
4097 case 0x25: /* sll */
4098 if (IS_IMM) { /* immediate */
4099 simm = GET_FIELDs(insn, 20, 31);
4100 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4101 } else { /* register */
4102 cpu_tmp0 = get_temp_tl(dc);
4103 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4104 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4106 gen_store_gpr(dc, rd, cpu_dst);
4107 break;
4108 case 0x26: /* srl */
4109 if (IS_IMM) { /* immediate */
4110 simm = GET_FIELDs(insn, 20, 31);
4111 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4112 } else { /* register */
4113 cpu_tmp0 = get_temp_tl(dc);
4114 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4115 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4117 gen_store_gpr(dc, rd, cpu_dst);
4118 break;
4119 case 0x27: /* sra */
4120 if (IS_IMM) { /* immediate */
4121 simm = GET_FIELDs(insn, 20, 31);
4122 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4123 } else { /* register */
4124 cpu_tmp0 = get_temp_tl(dc);
4125 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4126 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4128 gen_store_gpr(dc, rd, cpu_dst);
4129 break;
4130 #endif
4131 case 0x30:
4133 cpu_tmp0 = get_temp_tl(dc);
4134 switch(rd) {
4135 case 0: /* wry */
4136 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4137 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4138 break;
4139 #ifndef TARGET_SPARC64
4140 case 0x01 ... 0x0f: /* undefined in the
4141 SPARCv8 manual, nop
4142 on the microSPARC
4143 II */
4144 case 0x10 ... 0x1f: /* implementation-dependent
4145 in the SPARCv8
4146 manual, nop on the
4147 microSPARC II */
4148 if ((rd == 0x13) && (dc->def->features &
4149 CPU_FEATURE_POWERDOWN)) {
4150 /* LEON3 power-down */
4151 save_state(dc);
4152 gen_helper_power_down(cpu_env);
4154 break;
4155 #else
4156 case 0x2: /* V9 wrccr */
4157 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4158 gen_helper_wrccr(cpu_env, cpu_tmp0);
4159 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4160 dc->cc_op = CC_OP_FLAGS;
4161 break;
4162 case 0x3: /* V9 wrasi */
4163 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4164 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4165 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4166 offsetof(CPUSPARCState, asi));
4167 /* End TB to notice changed ASI. */
4168 save_state(dc);
4169 gen_op_next_insn();
4170 tcg_gen_exit_tb(0);
4171 dc->is_br = 1;
4172 break;
4173 case 0x6: /* V9 wrfprs */
4174 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4175 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4176 dc->fprs_dirty = 0;
4177 save_state(dc);
4178 gen_op_next_insn();
4179 tcg_gen_exit_tb(0);
4180 dc->is_br = 1;
4181 break;
4182 case 0xf: /* V9 sir, nop if user */
4183 #if !defined(CONFIG_USER_ONLY)
4184 if (supervisor(dc)) {
4185 ; // XXX
4187 #endif
4188 break;
4189 case 0x13: /* Graphics Status */
4190 if (gen_trap_ifnofpu(dc)) {
4191 goto jmp_insn;
4193 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4194 break;
4195 case 0x14: /* Softint set */
4196 if (!supervisor(dc))
4197 goto illegal_insn;
4198 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4199 gen_helper_set_softint(cpu_env, cpu_tmp0);
4200 break;
4201 case 0x15: /* Softint clear */
4202 if (!supervisor(dc))
4203 goto illegal_insn;
4204 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4205 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4206 break;
4207 case 0x16: /* Softint write */
4208 if (!supervisor(dc))
4209 goto illegal_insn;
4210 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4211 gen_helper_write_softint(cpu_env, cpu_tmp0);
4212 break;
4213 case 0x17: /* Tick compare */
4214 #if !defined(CONFIG_USER_ONLY)
4215 if (!supervisor(dc))
4216 goto illegal_insn;
4217 #endif
4219 TCGv_ptr r_tickptr;
4221 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4222 cpu_src2);
4223 r_tickptr = tcg_temp_new_ptr();
4224 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4225 offsetof(CPUSPARCState, tick));
4226 gen_helper_tick_set_limit(r_tickptr,
4227 cpu_tick_cmpr);
4228 tcg_temp_free_ptr(r_tickptr);
4230 break;
4231 case 0x18: /* System tick */
4232 #if !defined(CONFIG_USER_ONLY)
4233 if (!supervisor(dc))
4234 goto illegal_insn;
4235 #endif
4237 TCGv_ptr r_tickptr;
4239 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4240 cpu_src2);
4241 r_tickptr = tcg_temp_new_ptr();
4242 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4243 offsetof(CPUSPARCState, stick));
4244 gen_helper_tick_set_count(r_tickptr,
4245 cpu_tmp0);
4246 tcg_temp_free_ptr(r_tickptr);
4248 break;
4249 case 0x19: /* System tick compare */
4250 #if !defined(CONFIG_USER_ONLY)
4251 if (!supervisor(dc))
4252 goto illegal_insn;
4253 #endif
4255 TCGv_ptr r_tickptr;
4257 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4258 cpu_src2);
4259 r_tickptr = tcg_temp_new_ptr();
4260 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4261 offsetof(CPUSPARCState, stick));
4262 gen_helper_tick_set_limit(r_tickptr,
4263 cpu_stick_cmpr);
4264 tcg_temp_free_ptr(r_tickptr);
4266 break;
4268 case 0x10: /* Performance Control */
4269 case 0x11: /* Performance Instrumentation
4270 Counter */
4271 case 0x12: /* Dispatch Control */
4272 #endif
4273 default:
4274 goto illegal_insn;
4277 break;
4278 #if !defined(CONFIG_USER_ONLY)
4279 case 0x31: /* wrpsr, V9 saved, restored */
4281 if (!supervisor(dc))
4282 goto priv_insn;
4283 #ifdef TARGET_SPARC64
4284 switch (rd) {
4285 case 0:
4286 gen_helper_saved(cpu_env);
4287 break;
4288 case 1:
4289 gen_helper_restored(cpu_env);
4290 break;
4291 case 2: /* UA2005 allclean */
4292 case 3: /* UA2005 otherw */
4293 case 4: /* UA2005 normalw */
4294 case 5: /* UA2005 invalw */
4295 // XXX
4296 default:
4297 goto illegal_insn;
4299 #else
4300 cpu_tmp0 = get_temp_tl(dc);
4301 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4302 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4303 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4304 dc->cc_op = CC_OP_FLAGS;
4305 save_state(dc);
4306 gen_op_next_insn();
4307 tcg_gen_exit_tb(0);
4308 dc->is_br = 1;
4309 #endif
4311 break;
4312 case 0x32: /* wrwim, V9 wrpr */
4314 if (!supervisor(dc))
4315 goto priv_insn;
4316 cpu_tmp0 = get_temp_tl(dc);
4317 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4318 #ifdef TARGET_SPARC64
4319 switch (rd) {
4320 case 0: // tpc
4322 TCGv_ptr r_tsptr;
4324 r_tsptr = tcg_temp_new_ptr();
4325 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4326 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4327 offsetof(trap_state, tpc));
4328 tcg_temp_free_ptr(r_tsptr);
4330 break;
4331 case 1: // tnpc
4333 TCGv_ptr r_tsptr;
4335 r_tsptr = tcg_temp_new_ptr();
4336 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4337 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4338 offsetof(trap_state, tnpc));
4339 tcg_temp_free_ptr(r_tsptr);
4341 break;
4342 case 2: // tstate
4344 TCGv_ptr r_tsptr;
4346 r_tsptr = tcg_temp_new_ptr();
4347 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4348 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4349 offsetof(trap_state,
4350 tstate));
4351 tcg_temp_free_ptr(r_tsptr);
4353 break;
4354 case 3: // tt
4356 TCGv_ptr r_tsptr;
4358 r_tsptr = tcg_temp_new_ptr();
4359 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4360 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4361 offsetof(trap_state, tt));
4362 tcg_temp_free_ptr(r_tsptr);
4364 break;
4365 case 4: // tick
4367 TCGv_ptr r_tickptr;
4369 r_tickptr = tcg_temp_new_ptr();
4370 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4371 offsetof(CPUSPARCState, tick));
4372 gen_helper_tick_set_count(r_tickptr,
4373 cpu_tmp0);
4374 tcg_temp_free_ptr(r_tickptr);
4376 break;
4377 case 5: // tba
4378 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4379 break;
4380 case 6: // pstate
4381 save_state(dc);
4382 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4383 dc->npc = DYNAMIC_PC;
4384 break;
4385 case 7: // tl
4386 save_state(dc);
4387 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4388 offsetof(CPUSPARCState, tl));
4389 dc->npc = DYNAMIC_PC;
4390 break;
4391 case 8: // pil
4392 gen_helper_wrpil(cpu_env, cpu_tmp0);
4393 break;
4394 case 9: // cwp
4395 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4396 break;
4397 case 10: // cansave
4398 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4399 offsetof(CPUSPARCState,
4400 cansave));
4401 break;
4402 case 11: // canrestore
4403 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4404 offsetof(CPUSPARCState,
4405 canrestore));
4406 break;
4407 case 12: // cleanwin
4408 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4409 offsetof(CPUSPARCState,
4410 cleanwin));
4411 break;
4412 case 13: // otherwin
4413 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4414 offsetof(CPUSPARCState,
4415 otherwin));
4416 break;
4417 case 14: // wstate
4418 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4419 offsetof(CPUSPARCState,
4420 wstate));
4421 break;
4422 case 16: // UA2005 gl
4423 CHECK_IU_FEATURE(dc, GL);
4424 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4425 offsetof(CPUSPARCState, gl));
4426 break;
4427 case 26: // UA2005 strand status
4428 CHECK_IU_FEATURE(dc, HYPV);
4429 if (!hypervisor(dc))
4430 goto priv_insn;
4431 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4432 break;
4433 default:
4434 goto illegal_insn;
4436 #else
4437 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4438 if (dc->def->nwindows != 32) {
4439 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4440 (1 << dc->def->nwindows) - 1);
4442 #endif
4444 break;
4445 case 0x33: /* wrtbr, UA2005 wrhpr */
4447 #ifndef TARGET_SPARC64
4448 if (!supervisor(dc))
4449 goto priv_insn;
4450 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4451 #else
4452 CHECK_IU_FEATURE(dc, HYPV);
4453 if (!hypervisor(dc))
4454 goto priv_insn;
4455 cpu_tmp0 = get_temp_tl(dc);
4456 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4457 switch (rd) {
4458 case 0: // hpstate
4459 // XXX gen_op_wrhpstate();
4460 save_state(dc);
4461 gen_op_next_insn();
4462 tcg_gen_exit_tb(0);
4463 dc->is_br = 1;
4464 break;
4465 case 1: // htstate
4466 // XXX gen_op_wrhtstate();
4467 break;
4468 case 3: // hintp
4469 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4470 break;
4471 case 5: // htba
4472 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4473 break;
4474 case 31: // hstick_cmpr
4476 TCGv_ptr r_tickptr;
4478 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4479 r_tickptr = tcg_temp_new_ptr();
4480 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4481 offsetof(CPUSPARCState, hstick));
4482 gen_helper_tick_set_limit(r_tickptr,
4483 cpu_hstick_cmpr);
4484 tcg_temp_free_ptr(r_tickptr);
4486 break;
4487 case 6: // hver readonly
4488 default:
4489 goto illegal_insn;
4491 #endif
4493 break;
4494 #endif
4495 #ifdef TARGET_SPARC64
4496 case 0x2c: /* V9 movcc */
4498 int cc = GET_FIELD_SP(insn, 11, 12);
4499 int cond = GET_FIELD_SP(insn, 14, 17);
4500 DisasCompare cmp;
4501 TCGv dst;
4503 if (insn & (1 << 18)) {
4504 if (cc == 0) {
4505 gen_compare(&cmp, 0, cond, dc);
4506 } else if (cc == 2) {
4507 gen_compare(&cmp, 1, cond, dc);
4508 } else {
4509 goto illegal_insn;
4511 } else {
4512 gen_fcompare(&cmp, cc, cond);
4515 /* The get_src2 above loaded the normal 13-bit
4516 immediate field, not the 11-bit field we have
4517 in movcc. But it did handle the reg case. */
4518 if (IS_IMM) {
4519 simm = GET_FIELD_SPs(insn, 0, 10);
4520 tcg_gen_movi_tl(cpu_src2, simm);
4523 dst = gen_load_gpr(dc, rd);
4524 tcg_gen_movcond_tl(cmp.cond, dst,
4525 cmp.c1, cmp.c2,
4526 cpu_src2, dst);
4527 free_compare(&cmp);
4528 gen_store_gpr(dc, rd, dst);
4529 break;
4531 case 0x2d: /* V9 sdivx */
4532 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4533 gen_store_gpr(dc, rd, cpu_dst);
4534 break;
4535 case 0x2e: /* V9 popc */
4536 gen_helper_popc(cpu_dst, cpu_src2);
4537 gen_store_gpr(dc, rd, cpu_dst);
4538 break;
4539 case 0x2f: /* V9 movr */
4541 int cond = GET_FIELD_SP(insn, 10, 12);
4542 DisasCompare cmp;
4543 TCGv dst;
4545 gen_compare_reg(&cmp, cond, cpu_src1);
4547 /* The get_src2 above loaded the normal 13-bit
4548 immediate field, not the 10-bit field we have
4549 in movr. But it did handle the reg case. */
4550 if (IS_IMM) {
4551 simm = GET_FIELD_SPs(insn, 0, 9);
4552 tcg_gen_movi_tl(cpu_src2, simm);
4555 dst = gen_load_gpr(dc, rd);
4556 tcg_gen_movcond_tl(cmp.cond, dst,
4557 cmp.c1, cmp.c2,
4558 cpu_src2, dst);
4559 free_compare(&cmp);
4560 gen_store_gpr(dc, rd, dst);
4561 break;
4563 #endif
4564 default:
4565 goto illegal_insn;
4568 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4569 #ifdef TARGET_SPARC64
4570 int opf = GET_FIELD_SP(insn, 5, 13);
4571 rs1 = GET_FIELD(insn, 13, 17);
4572 rs2 = GET_FIELD(insn, 27, 31);
4573 if (gen_trap_ifnofpu(dc)) {
4574 goto jmp_insn;
4577 switch (opf) {
4578 case 0x000: /* VIS I edge8cc */
4579 CHECK_FPU_FEATURE(dc, VIS1);
4580 cpu_src1 = gen_load_gpr(dc, rs1);
4581 cpu_src2 = gen_load_gpr(dc, rs2);
4582 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4583 gen_store_gpr(dc, rd, cpu_dst);
4584 break;
4585 case 0x001: /* VIS II edge8n */
4586 CHECK_FPU_FEATURE(dc, VIS2);
4587 cpu_src1 = gen_load_gpr(dc, rs1);
4588 cpu_src2 = gen_load_gpr(dc, rs2);
4589 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4590 gen_store_gpr(dc, rd, cpu_dst);
4591 break;
4592 case 0x002: /* VIS I edge8lcc */
4593 CHECK_FPU_FEATURE(dc, VIS1);
4594 cpu_src1 = gen_load_gpr(dc, rs1);
4595 cpu_src2 = gen_load_gpr(dc, rs2);
4596 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4597 gen_store_gpr(dc, rd, cpu_dst);
4598 break;
4599 case 0x003: /* VIS II edge8ln */
4600 CHECK_FPU_FEATURE(dc, VIS2);
4601 cpu_src1 = gen_load_gpr(dc, rs1);
4602 cpu_src2 = gen_load_gpr(dc, rs2);
4603 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4604 gen_store_gpr(dc, rd, cpu_dst);
4605 break;
4606 case 0x004: /* VIS I edge16cc */
4607 CHECK_FPU_FEATURE(dc, VIS1);
4608 cpu_src1 = gen_load_gpr(dc, rs1);
4609 cpu_src2 = gen_load_gpr(dc, rs2);
4610 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4611 gen_store_gpr(dc, rd, cpu_dst);
4612 break;
4613 case 0x005: /* VIS II edge16n */
4614 CHECK_FPU_FEATURE(dc, VIS2);
4615 cpu_src1 = gen_load_gpr(dc, rs1);
4616 cpu_src2 = gen_load_gpr(dc, rs2);
4617 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4618 gen_store_gpr(dc, rd, cpu_dst);
4619 break;
4620 case 0x006: /* VIS I edge16lcc */
4621 CHECK_FPU_FEATURE(dc, VIS1);
4622 cpu_src1 = gen_load_gpr(dc, rs1);
4623 cpu_src2 = gen_load_gpr(dc, rs2);
4624 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4625 gen_store_gpr(dc, rd, cpu_dst);
4626 break;
4627 case 0x007: /* VIS II edge16ln */
4628 CHECK_FPU_FEATURE(dc, VIS2);
4629 cpu_src1 = gen_load_gpr(dc, rs1);
4630 cpu_src2 = gen_load_gpr(dc, rs2);
4631 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4632 gen_store_gpr(dc, rd, cpu_dst);
4633 break;
4634 case 0x008: /* VIS I edge32cc */
4635 CHECK_FPU_FEATURE(dc, VIS1);
4636 cpu_src1 = gen_load_gpr(dc, rs1);
4637 cpu_src2 = gen_load_gpr(dc, rs2);
4638 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4639 gen_store_gpr(dc, rd, cpu_dst);
4640 break;
4641 case 0x009: /* VIS II edge32n */
4642 CHECK_FPU_FEATURE(dc, VIS2);
4643 cpu_src1 = gen_load_gpr(dc, rs1);
4644 cpu_src2 = gen_load_gpr(dc, rs2);
4645 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4646 gen_store_gpr(dc, rd, cpu_dst);
4647 break;
4648 case 0x00a: /* VIS I edge32lcc */
4649 CHECK_FPU_FEATURE(dc, VIS1);
4650 cpu_src1 = gen_load_gpr(dc, rs1);
4651 cpu_src2 = gen_load_gpr(dc, rs2);
4652 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4653 gen_store_gpr(dc, rd, cpu_dst);
4654 break;
4655 case 0x00b: /* VIS II edge32ln */
4656 CHECK_FPU_FEATURE(dc, VIS2);
4657 cpu_src1 = gen_load_gpr(dc, rs1);
4658 cpu_src2 = gen_load_gpr(dc, rs2);
4659 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4660 gen_store_gpr(dc, rd, cpu_dst);
4661 break;
4662 case 0x010: /* VIS I array8 */
4663 CHECK_FPU_FEATURE(dc, VIS1);
4664 cpu_src1 = gen_load_gpr(dc, rs1);
4665 cpu_src2 = gen_load_gpr(dc, rs2);
4666 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4667 gen_store_gpr(dc, rd, cpu_dst);
4668 break;
4669 case 0x012: /* VIS I array16 */
4670 CHECK_FPU_FEATURE(dc, VIS1);
4671 cpu_src1 = gen_load_gpr(dc, rs1);
4672 cpu_src2 = gen_load_gpr(dc, rs2);
4673 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4674 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4675 gen_store_gpr(dc, rd, cpu_dst);
4676 break;
4677 case 0x014: /* VIS I array32 */
4678 CHECK_FPU_FEATURE(dc, VIS1);
4679 cpu_src1 = gen_load_gpr(dc, rs1);
4680 cpu_src2 = gen_load_gpr(dc, rs2);
4681 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4682 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4683 gen_store_gpr(dc, rd, cpu_dst);
4684 break;
4685 case 0x018: /* VIS I alignaddr */
4686 CHECK_FPU_FEATURE(dc, VIS1);
4687 cpu_src1 = gen_load_gpr(dc, rs1);
4688 cpu_src2 = gen_load_gpr(dc, rs2);
4689 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4690 gen_store_gpr(dc, rd, cpu_dst);
4691 break;
4692 case 0x01a: /* VIS I alignaddrl */
4693 CHECK_FPU_FEATURE(dc, VIS1);
4694 cpu_src1 = gen_load_gpr(dc, rs1);
4695 cpu_src2 = gen_load_gpr(dc, rs2);
4696 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4697 gen_store_gpr(dc, rd, cpu_dst);
4698 break;
4699 case 0x019: /* VIS II bmask */
4700 CHECK_FPU_FEATURE(dc, VIS2);
4701 cpu_src1 = gen_load_gpr(dc, rs1);
4702 cpu_src2 = gen_load_gpr(dc, rs2);
4703 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4704 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4705 gen_store_gpr(dc, rd, cpu_dst);
4706 break;
4707 case 0x020: /* VIS I fcmple16 */
4708 CHECK_FPU_FEATURE(dc, VIS1);
4709 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4710 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4711 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4712 gen_store_gpr(dc, rd, cpu_dst);
4713 break;
4714 case 0x022: /* VIS I fcmpne16 */
4715 CHECK_FPU_FEATURE(dc, VIS1);
4716 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4717 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4718 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4719 gen_store_gpr(dc, rd, cpu_dst);
4720 break;
4721 case 0x024: /* VIS I fcmple32 */
4722 CHECK_FPU_FEATURE(dc, VIS1);
4723 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4724 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4725 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4726 gen_store_gpr(dc, rd, cpu_dst);
4727 break;
4728 case 0x026: /* VIS I fcmpne32 */
4729 CHECK_FPU_FEATURE(dc, VIS1);
4730 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4731 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4732 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4733 gen_store_gpr(dc, rd, cpu_dst);
4734 break;
4735 case 0x028: /* VIS I fcmpgt16 */
4736 CHECK_FPU_FEATURE(dc, VIS1);
4737 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4738 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4739 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4740 gen_store_gpr(dc, rd, cpu_dst);
4741 break;
4742 case 0x02a: /* VIS I fcmpeq16 */
4743 CHECK_FPU_FEATURE(dc, VIS1);
4744 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4745 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4746 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4747 gen_store_gpr(dc, rd, cpu_dst);
4748 break;
4749 case 0x02c: /* VIS I fcmpgt32 */
4750 CHECK_FPU_FEATURE(dc, VIS1);
4751 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4752 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4753 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4754 gen_store_gpr(dc, rd, cpu_dst);
4755 break;
4756 case 0x02e: /* VIS I fcmpeq32 */
4757 CHECK_FPU_FEATURE(dc, VIS1);
4758 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4759 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4760 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4761 gen_store_gpr(dc, rd, cpu_dst);
4762 break;
4763 case 0x031: /* VIS I fmul8x16 */
4764 CHECK_FPU_FEATURE(dc, VIS1);
4765 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4766 break;
4767 case 0x033: /* VIS I fmul8x16au */
4768 CHECK_FPU_FEATURE(dc, VIS1);
4769 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4770 break;
4771 case 0x035: /* VIS I fmul8x16al */
4772 CHECK_FPU_FEATURE(dc, VIS1);
4773 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4774 break;
4775 case 0x036: /* VIS I fmul8sux16 */
4776 CHECK_FPU_FEATURE(dc, VIS1);
4777 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4778 break;
4779 case 0x037: /* VIS I fmul8ulx16 */
4780 CHECK_FPU_FEATURE(dc, VIS1);
4781 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4782 break;
4783 case 0x038: /* VIS I fmuld8sux16 */
4784 CHECK_FPU_FEATURE(dc, VIS1);
4785 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4786 break;
4787 case 0x039: /* VIS I fmuld8ulx16 */
4788 CHECK_FPU_FEATURE(dc, VIS1);
4789 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4790 break;
4791 case 0x03a: /* VIS I fpack32 */
4792 CHECK_FPU_FEATURE(dc, VIS1);
4793 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4794 break;
4795 case 0x03b: /* VIS I fpack16 */
4796 CHECK_FPU_FEATURE(dc, VIS1);
4797 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4798 cpu_dst_32 = gen_dest_fpr_F(dc);
4799 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4800 gen_store_fpr_F(dc, rd, cpu_dst_32);
4801 break;
4802 case 0x03d: /* VIS I fpackfix */
4803 CHECK_FPU_FEATURE(dc, VIS1);
4804 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4805 cpu_dst_32 = gen_dest_fpr_F(dc);
4806 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4807 gen_store_fpr_F(dc, rd, cpu_dst_32);
4808 break;
4809 case 0x03e: /* VIS I pdist */
4810 CHECK_FPU_FEATURE(dc, VIS1);
4811 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4812 break;
4813 case 0x048: /* VIS I faligndata */
4814 CHECK_FPU_FEATURE(dc, VIS1);
4815 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4816 break;
4817 case 0x04b: /* VIS I fpmerge */
4818 CHECK_FPU_FEATURE(dc, VIS1);
4819 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4820 break;
4821 case 0x04c: /* VIS II bshuffle */
4822 CHECK_FPU_FEATURE(dc, VIS2);
4823 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4824 break;
4825 case 0x04d: /* VIS I fexpand */
4826 CHECK_FPU_FEATURE(dc, VIS1);
4827 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4828 break;
4829 case 0x050: /* VIS I fpadd16 */
4830 CHECK_FPU_FEATURE(dc, VIS1);
4831 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4832 break;
4833 case 0x051: /* VIS I fpadd16s */
4834 CHECK_FPU_FEATURE(dc, VIS1);
4835 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4836 break;
4837 case 0x052: /* VIS I fpadd32 */
4838 CHECK_FPU_FEATURE(dc, VIS1);
4839 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4840 break;
4841 case 0x053: /* VIS I fpadd32s */
4842 CHECK_FPU_FEATURE(dc, VIS1);
4843 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4844 break;
4845 case 0x054: /* VIS I fpsub16 */
4846 CHECK_FPU_FEATURE(dc, VIS1);
4847 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4848 break;
4849 case 0x055: /* VIS I fpsub16s */
4850 CHECK_FPU_FEATURE(dc, VIS1);
4851 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4852 break;
4853 case 0x056: /* VIS I fpsub32 */
4854 CHECK_FPU_FEATURE(dc, VIS1);
4855 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4856 break;
4857 case 0x057: /* VIS I fpsub32s */
4858 CHECK_FPU_FEATURE(dc, VIS1);
4859 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4860 break;
4861 case 0x060: /* VIS I fzero */
4862 CHECK_FPU_FEATURE(dc, VIS1);
4863 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4864 tcg_gen_movi_i64(cpu_dst_64, 0);
4865 gen_store_fpr_D(dc, rd, cpu_dst_64);
4866 break;
4867 case 0x061: /* VIS I fzeros */
4868 CHECK_FPU_FEATURE(dc, VIS1);
4869 cpu_dst_32 = gen_dest_fpr_F(dc);
4870 tcg_gen_movi_i32(cpu_dst_32, 0);
4871 gen_store_fpr_F(dc, rd, cpu_dst_32);
4872 break;
4873 case 0x062: /* VIS I fnor */
4874 CHECK_FPU_FEATURE(dc, VIS1);
4875 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4876 break;
4877 case 0x063: /* VIS I fnors */
4878 CHECK_FPU_FEATURE(dc, VIS1);
4879 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4880 break;
4881 case 0x064: /* VIS I fandnot2 */
4882 CHECK_FPU_FEATURE(dc, VIS1);
4883 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4884 break;
4885 case 0x065: /* VIS I fandnot2s */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4888 break;
4889 case 0x066: /* VIS I fnot2 */
4890 CHECK_FPU_FEATURE(dc, VIS1);
4891 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4892 break;
4893 case 0x067: /* VIS I fnot2s */
4894 CHECK_FPU_FEATURE(dc, VIS1);
4895 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4896 break;
4897 case 0x068: /* VIS I fandnot1 */
4898 CHECK_FPU_FEATURE(dc, VIS1);
4899 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4900 break;
4901 case 0x069: /* VIS I fandnot1s */
4902 CHECK_FPU_FEATURE(dc, VIS1);
4903 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4904 break;
4905 case 0x06a: /* VIS I fnot1 */
4906 CHECK_FPU_FEATURE(dc, VIS1);
4907 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4908 break;
4909 case 0x06b: /* VIS I fnot1s */
4910 CHECK_FPU_FEATURE(dc, VIS1);
4911 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4912 break;
4913 case 0x06c: /* VIS I fxor */
4914 CHECK_FPU_FEATURE(dc, VIS1);
4915 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4916 break;
4917 case 0x06d: /* VIS I fxors */
4918 CHECK_FPU_FEATURE(dc, VIS1);
4919 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4920 break;
4921 case 0x06e: /* VIS I fnand */
4922 CHECK_FPU_FEATURE(dc, VIS1);
4923 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4924 break;
4925 case 0x06f: /* VIS I fnands */
4926 CHECK_FPU_FEATURE(dc, VIS1);
4927 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4928 break;
4929 case 0x070: /* VIS I fand */
4930 CHECK_FPU_FEATURE(dc, VIS1);
4931 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4932 break;
4933 case 0x071: /* VIS I fands */
4934 CHECK_FPU_FEATURE(dc, VIS1);
4935 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4936 break;
4937 case 0x072: /* VIS I fxnor */
4938 CHECK_FPU_FEATURE(dc, VIS1);
4939 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4940 break;
4941 case 0x073: /* VIS I fxnors */
4942 CHECK_FPU_FEATURE(dc, VIS1);
4943 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4944 break;
4945 case 0x074: /* VIS I fsrc1 */
4946 CHECK_FPU_FEATURE(dc, VIS1);
4947 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4948 gen_store_fpr_D(dc, rd, cpu_src1_64);
4949 break;
4950 case 0x075: /* VIS I fsrc1s */
4951 CHECK_FPU_FEATURE(dc, VIS1);
4952 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4953 gen_store_fpr_F(dc, rd, cpu_src1_32);
4954 break;
4955 case 0x076: /* VIS I fornot2 */
4956 CHECK_FPU_FEATURE(dc, VIS1);
4957 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4958 break;
4959 case 0x077: /* VIS I fornot2s */
4960 CHECK_FPU_FEATURE(dc, VIS1);
4961 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4962 break;
4963 case 0x078: /* VIS I fsrc2 */
4964 CHECK_FPU_FEATURE(dc, VIS1);
4965 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4966 gen_store_fpr_D(dc, rd, cpu_src1_64);
4967 break;
4968 case 0x079: /* VIS I fsrc2s */
4969 CHECK_FPU_FEATURE(dc, VIS1);
4970 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4971 gen_store_fpr_F(dc, rd, cpu_src1_32);
4972 break;
4973 case 0x07a: /* VIS I fornot1 */
4974 CHECK_FPU_FEATURE(dc, VIS1);
4975 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4976 break;
4977 case 0x07b: /* VIS I fornot1s */
4978 CHECK_FPU_FEATURE(dc, VIS1);
4979 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4980 break;
4981 case 0x07c: /* VIS I for */
4982 CHECK_FPU_FEATURE(dc, VIS1);
4983 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4984 break;
4985 case 0x07d: /* VIS I fors */
4986 CHECK_FPU_FEATURE(dc, VIS1);
4987 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4988 break;
4989 case 0x07e: /* VIS I fone */
4990 CHECK_FPU_FEATURE(dc, VIS1);
4991 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4992 tcg_gen_movi_i64(cpu_dst_64, -1);
4993 gen_store_fpr_D(dc, rd, cpu_dst_64);
4994 break;
4995 case 0x07f: /* VIS I fones */
4996 CHECK_FPU_FEATURE(dc, VIS1);
4997 cpu_dst_32 = gen_dest_fpr_F(dc);
4998 tcg_gen_movi_i32(cpu_dst_32, -1);
4999 gen_store_fpr_F(dc, rd, cpu_dst_32);
5000 break;
5001 case 0x080: /* VIS I shutdown */
5002 case 0x081: /* VIS II siam */
5003 // XXX
5004 goto illegal_insn;
5005 default:
5006 goto illegal_insn;
5008 #else
5009 goto ncp_insn;
5010 #endif
5011 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5012 #ifdef TARGET_SPARC64
5013 goto illegal_insn;
5014 #else
5015 goto ncp_insn;
5016 #endif
5017 #ifdef TARGET_SPARC64
5018 } else if (xop == 0x39) { /* V9 return */
5019 save_state(dc);
5020 cpu_src1 = get_src1(dc, insn);
5021 cpu_tmp0 = get_temp_tl(dc);
5022 if (IS_IMM) { /* immediate */
5023 simm = GET_FIELDs(insn, 19, 31);
5024 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5025 } else { /* register */
5026 rs2 = GET_FIELD(insn, 27, 31);
5027 if (rs2) {
5028 cpu_src2 = gen_load_gpr(dc, rs2);
5029 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5030 } else {
5031 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5034 gen_helper_restore(cpu_env);
5035 gen_mov_pc_npc(dc);
5036 gen_check_align(cpu_tmp0, 3);
5037 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5038 dc->npc = DYNAMIC_PC;
5039 goto jmp_insn;
5040 #endif
5041 } else {
5042 cpu_src1 = get_src1(dc, insn);
5043 cpu_tmp0 = get_temp_tl(dc);
5044 if (IS_IMM) { /* immediate */
5045 simm = GET_FIELDs(insn, 19, 31);
5046 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5047 } else { /* register */
5048 rs2 = GET_FIELD(insn, 27, 31);
5049 if (rs2) {
5050 cpu_src2 = gen_load_gpr(dc, rs2);
5051 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5052 } else {
5053 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5056 switch (xop) {
5057 case 0x38: /* jmpl */
5059 TCGv t = gen_dest_gpr(dc, rd);
5060 tcg_gen_movi_tl(t, dc->pc);
5061 gen_store_gpr(dc, rd, t);
5063 gen_mov_pc_npc(dc);
5064 gen_check_align(cpu_tmp0, 3);
5065 gen_address_mask(dc, cpu_tmp0);
5066 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5067 dc->npc = DYNAMIC_PC;
5069 goto jmp_insn;
5070 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5071 case 0x39: /* rett, V9 return */
5073 if (!supervisor(dc))
5074 goto priv_insn;
5075 gen_mov_pc_npc(dc);
5076 gen_check_align(cpu_tmp0, 3);
5077 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5078 dc->npc = DYNAMIC_PC;
5079 gen_helper_rett(cpu_env);
5081 goto jmp_insn;
5082 #endif
5083 case 0x3b: /* flush */
5084 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5085 goto unimp_flush;
5086 /* nop */
5087 break;
5088 case 0x3c: /* save */
5089 gen_helper_save(cpu_env);
5090 gen_store_gpr(dc, rd, cpu_tmp0);
5091 break;
5092 case 0x3d: /* restore */
5093 gen_helper_restore(cpu_env);
5094 gen_store_gpr(dc, rd, cpu_tmp0);
5095 break;
5096 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5097 case 0x3e: /* V9 done/retry */
5099 switch (rd) {
5100 case 0:
5101 if (!supervisor(dc))
5102 goto priv_insn;
5103 dc->npc = DYNAMIC_PC;
5104 dc->pc = DYNAMIC_PC;
5105 gen_helper_done(cpu_env);
5106 goto jmp_insn;
5107 case 1:
5108 if (!supervisor(dc))
5109 goto priv_insn;
5110 dc->npc = DYNAMIC_PC;
5111 dc->pc = DYNAMIC_PC;
5112 gen_helper_retry(cpu_env);
5113 goto jmp_insn;
5114 default:
5115 goto illegal_insn;
5118 break;
5119 #endif
5120 default:
5121 goto illegal_insn;
5124 break;
5126 break;
5127 case 3: /* load/store instructions */
5129 unsigned int xop = GET_FIELD(insn, 7, 12);
5130 /* ??? gen_address_mask prevents us from using a source
5131 register directly. Always generate a temporary. */
5132 TCGv cpu_addr = get_temp_tl(dc);
5134 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5135 if (xop == 0x3c || xop == 0x3e) {
5136 /* V9 casa/casxa : no offset */
5137 } else if (IS_IMM) { /* immediate */
5138 simm = GET_FIELDs(insn, 19, 31);
5139 if (simm != 0) {
5140 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5142 } else { /* register */
5143 rs2 = GET_FIELD(insn, 27, 31);
5144 if (rs2 != 0) {
5145 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5148 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5149 (xop > 0x17 && xop <= 0x1d ) ||
5150 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5151 TCGv cpu_val = gen_dest_gpr(dc, rd);
5153 switch (xop) {
5154 case 0x0: /* ld, V9 lduw, load unsigned word */
5155 gen_address_mask(dc, cpu_addr);
5156 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5157 break;
5158 case 0x1: /* ldub, load unsigned byte */
5159 gen_address_mask(dc, cpu_addr);
5160 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5161 break;
5162 case 0x2: /* lduh, load unsigned halfword */
5163 gen_address_mask(dc, cpu_addr);
5164 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5165 break;
5166 case 0x3: /* ldd, load double word */
5167 if (rd & 1)
5168 goto illegal_insn;
5169 else {
5170 TCGv_i64 t64;
5172 gen_address_mask(dc, cpu_addr);
5173 t64 = tcg_temp_new_i64();
5174 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5175 tcg_gen_trunc_i64_tl(cpu_val, t64);
5176 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5177 gen_store_gpr(dc, rd + 1, cpu_val);
5178 tcg_gen_shri_i64(t64, t64, 32);
5179 tcg_gen_trunc_i64_tl(cpu_val, t64);
5180 tcg_temp_free_i64(t64);
5181 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5183 break;
5184 case 0x9: /* ldsb, load signed byte */
5185 gen_address_mask(dc, cpu_addr);
5186 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5187 break;
5188 case 0xa: /* ldsh, load signed halfword */
5189 gen_address_mask(dc, cpu_addr);
5190 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5191 break;
5192 case 0xd: /* ldstub -- XXX: should be atomically */
5194 TCGv r_const;
5195 TCGv tmp = tcg_temp_new();
5197 gen_address_mask(dc, cpu_addr);
5198 tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx);
5199 r_const = tcg_const_tl(0xff);
5200 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
5201 tcg_gen_mov_tl(cpu_val, tmp);
5202 tcg_temp_free(r_const);
5203 tcg_temp_free(tmp);
5205 break;
5206 case 0x0f:
5207 /* swap, swap register with memory. Also atomically */
5208 CHECK_IU_FEATURE(dc, SWAP);
5209 cpu_src1 = gen_load_gpr(dc, rd);
5210 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5211 dc->mem_idx, MO_TEUL);
5212 break;
5213 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5214 case 0x10: /* lda, V9 lduwa, load word alternate */
5215 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5216 break;
5217 case 0x11: /* lduba, load unsigned byte alternate */
5218 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5219 break;
5220 case 0x12: /* lduha, load unsigned halfword alternate */
5221 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5222 break;
5223 case 0x13: /* ldda, load double word alternate */
5224 if (rd & 1) {
5225 goto illegal_insn;
5227 gen_ldda_asi(dc, cpu_addr, insn, rd);
5228 goto skip_move;
5229 case 0x19: /* ldsba, load signed byte alternate */
5230 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5231 break;
5232 case 0x1a: /* ldsha, load signed halfword alternate */
5233 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5234 break;
5235 case 0x1d: /* ldstuba -- XXX: should be atomically */
5236 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5237 break;
5238 case 0x1f: /* swapa, swap reg with alt. memory. Also
5239 atomically */
5240 CHECK_IU_FEATURE(dc, SWAP);
5241 cpu_src1 = gen_load_gpr(dc, rd);
5242 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5243 break;
5245 #ifndef TARGET_SPARC64
5246 case 0x30: /* ldc */
5247 case 0x31: /* ldcsr */
5248 case 0x33: /* lddc */
5249 goto ncp_insn;
5250 #endif
5251 #endif
5252 #ifdef TARGET_SPARC64
5253 case 0x08: /* V9 ldsw */
5254 gen_address_mask(dc, cpu_addr);
5255 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5256 break;
5257 case 0x0b: /* V9 ldx */
5258 gen_address_mask(dc, cpu_addr);
5259 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5260 break;
5261 case 0x18: /* V9 ldswa */
5262 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5263 break;
5264 case 0x1b: /* V9 ldxa */
5265 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5266 break;
5267 case 0x2d: /* V9 prefetch, no effect */
5268 goto skip_move;
5269 case 0x30: /* V9 ldfa */
5270 if (gen_trap_ifnofpu(dc)) {
5271 goto jmp_insn;
5273 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5274 gen_update_fprs_dirty(dc, rd);
5275 goto skip_move;
5276 case 0x33: /* V9 lddfa */
5277 if (gen_trap_ifnofpu(dc)) {
5278 goto jmp_insn;
5280 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5281 gen_update_fprs_dirty(dc, DFPREG(rd));
5282 goto skip_move;
5283 case 0x3d: /* V9 prefetcha, no effect */
5284 goto skip_move;
5285 case 0x32: /* V9 ldqfa */
5286 CHECK_FPU_FEATURE(dc, FLOAT128);
5287 if (gen_trap_ifnofpu(dc)) {
5288 goto jmp_insn;
5290 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5291 gen_update_fprs_dirty(dc, QFPREG(rd));
5292 goto skip_move;
5293 #endif
5294 default:
5295 goto illegal_insn;
5297 gen_store_gpr(dc, rd, cpu_val);
5298 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5299 skip_move: ;
5300 #endif
5301 } else if (xop >= 0x20 && xop < 0x24) {
5302 TCGv t0;
5304 if (gen_trap_ifnofpu(dc)) {
5305 goto jmp_insn;
5307 switch (xop) {
5308 case 0x20: /* ldf, load fpreg */
5309 gen_address_mask(dc, cpu_addr);
5310 t0 = get_temp_tl(dc);
5311 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5312 cpu_dst_32 = gen_dest_fpr_F(dc);
5313 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5314 gen_store_fpr_F(dc, rd, cpu_dst_32);
5315 break;
5316 case 0x21: /* ldfsr, V9 ldxfsr */
5317 #ifdef TARGET_SPARC64
5318 gen_address_mask(dc, cpu_addr);
5319 if (rd == 1) {
5320 TCGv_i64 t64 = tcg_temp_new_i64();
5321 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5322 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5323 tcg_temp_free_i64(t64);
5324 break;
5326 #endif
5327 cpu_dst_32 = get_temp_i32(dc);
5328 t0 = get_temp_tl(dc);
5329 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5330 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5331 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5332 break;
5333 case 0x22: /* ldqf, load quad fpreg */
5335 TCGv_i32 r_const;
5337 CHECK_FPU_FEATURE(dc, FLOAT128);
5338 r_const = tcg_const_i32(dc->mem_idx);
5339 gen_address_mask(dc, cpu_addr);
5340 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
5341 tcg_temp_free_i32(r_const);
5342 gen_op_store_QT0_fpr(QFPREG(rd));
5343 gen_update_fprs_dirty(dc, QFPREG(rd));
5345 break;
5346 case 0x23: /* lddf, load double fpreg */
5347 gen_address_mask(dc, cpu_addr);
5348 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5349 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
5350 gen_store_fpr_D(dc, rd, cpu_dst_64);
5351 break;
5352 default:
5353 goto illegal_insn;
5355 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5356 xop == 0xe || xop == 0x1e) {
5357 TCGv cpu_val = gen_load_gpr(dc, rd);
5359 switch (xop) {
5360 case 0x4: /* st, store word */
5361 gen_address_mask(dc, cpu_addr);
5362 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5363 break;
5364 case 0x5: /* stb, store byte */
5365 gen_address_mask(dc, cpu_addr);
5366 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5367 break;
5368 case 0x6: /* sth, store halfword */
5369 gen_address_mask(dc, cpu_addr);
5370 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5371 break;
5372 case 0x7: /* std, store double word */
5373 if (rd & 1)
5374 goto illegal_insn;
5375 else {
5376 TCGv_i64 t64;
5377 TCGv lo;
5379 gen_address_mask(dc, cpu_addr);
5380 lo = gen_load_gpr(dc, rd + 1);
5381 t64 = tcg_temp_new_i64();
5382 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5383 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5384 tcg_temp_free_i64(t64);
5386 break;
5387 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5388 case 0x14: /* sta, V9 stwa, store word alternate */
5389 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5390 break;
5391 case 0x15: /* stba, store byte alternate */
5392 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5393 break;
5394 case 0x16: /* stha, store halfword alternate */
5395 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5396 break;
5397 case 0x17: /* stda, store double word alternate */
5398 if (rd & 1) {
5399 goto illegal_insn;
5401 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5402 break;
5403 #endif
5404 #ifdef TARGET_SPARC64
5405 case 0x0e: /* V9 stx */
5406 gen_address_mask(dc, cpu_addr);
5407 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5408 break;
5409 case 0x1e: /* V9 stxa */
5410 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5411 break;
5412 #endif
5413 default:
5414 goto illegal_insn;
5416 } else if (xop > 0x23 && xop < 0x28) {
5417 if (gen_trap_ifnofpu(dc)) {
5418 goto jmp_insn;
5420 switch (xop) {
5421 case 0x24: /* stf, store fpreg */
5423 TCGv t = get_temp_tl(dc);
5424 gen_address_mask(dc, cpu_addr);
5425 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5426 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5427 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5429 break;
5430 case 0x25: /* stfsr, V9 stxfsr */
5432 #ifdef TARGET_SPARC64
5433 gen_address_mask(dc, cpu_addr);
5434 if (rd == 1) {
5435 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5436 break;
5438 #endif
5439 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5441 break;
5442 case 0x26:
5443 #ifdef TARGET_SPARC64
5444 /* V9 stqf, store quad fpreg */
5446 TCGv_i32 r_const;
5448 CHECK_FPU_FEATURE(dc, FLOAT128);
5449 gen_op_load_fpr_QT0(QFPREG(rd));
5450 r_const = tcg_const_i32(dc->mem_idx);
5451 gen_address_mask(dc, cpu_addr);
5452 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5453 tcg_temp_free_i32(r_const);
5455 break;
5456 #else /* !TARGET_SPARC64 */
5457 /* stdfq, store floating point queue */
5458 #if defined(CONFIG_USER_ONLY)
5459 goto illegal_insn;
5460 #else
5461 if (!supervisor(dc))
5462 goto priv_insn;
5463 if (gen_trap_ifnofpu(dc)) {
5464 goto jmp_insn;
5466 goto nfq_insn;
5467 #endif
5468 #endif
5469 case 0x27: /* stdf, store double fpreg */
5470 gen_address_mask(dc, cpu_addr);
5471 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5472 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5473 break;
5474 default:
5475 goto illegal_insn;
5477 } else if (xop > 0x33 && xop < 0x3f) {
5478 switch (xop) {
5479 #ifdef TARGET_SPARC64
5480 case 0x34: /* V9 stfa */
5481 if (gen_trap_ifnofpu(dc)) {
5482 goto jmp_insn;
5484 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5485 break;
5486 case 0x36: /* V9 stqfa */
5488 CHECK_FPU_FEATURE(dc, FLOAT128);
5489 if (gen_trap_ifnofpu(dc)) {
5490 goto jmp_insn;
5492 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5494 break;
5495 case 0x37: /* V9 stdfa */
5496 if (gen_trap_ifnofpu(dc)) {
5497 goto jmp_insn;
5499 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5500 break;
5501 case 0x3e: /* V9 casxa */
5502 rs2 = GET_FIELD(insn, 27, 31);
5503 cpu_src2 = gen_load_gpr(dc, rs2);
5504 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5505 break;
5506 #else
5507 case 0x34: /* stc */
5508 case 0x35: /* stcsr */
5509 case 0x36: /* stdcq */
5510 case 0x37: /* stdc */
5511 goto ncp_insn;
5512 #endif
5513 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5514 case 0x3c: /* V9 or LEON3 casa */
5515 #ifndef TARGET_SPARC64
5516 CHECK_IU_FEATURE(dc, CASA);
5517 #endif
5518 rs2 = GET_FIELD(insn, 27, 31);
5519 cpu_src2 = gen_load_gpr(dc, rs2);
5520 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5521 break;
5522 #endif
5523 default:
5524 goto illegal_insn;
5526 } else {
5527 goto illegal_insn;
5530 break;
5532 /* default case for non jump instructions */
5533 if (dc->npc == DYNAMIC_PC) {
5534 dc->pc = DYNAMIC_PC;
5535 gen_op_next_insn();
5536 } else if (dc->npc == JUMP_PC) {
5537 /* we can do a static jump */
5538 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5539 dc->is_br = 1;
5540 } else {
5541 dc->pc = dc->npc;
5542 dc->npc = dc->npc + 4;
5544 jmp_insn:
5545 goto egress;
5546 illegal_insn:
5547 gen_exception(dc, TT_ILL_INSN);
5548 goto egress;
5549 unimp_flush:
5550 gen_exception(dc, TT_UNIMP_FLUSH);
5551 goto egress;
5552 #if !defined(CONFIG_USER_ONLY)
5553 priv_insn:
5554 gen_exception(dc, TT_PRIV_INSN);
5555 goto egress;
5556 #endif
5557 nfpu_insn:
5558 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5559 goto egress;
5560 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5561 nfq_insn:
5562 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5563 goto egress;
5564 #endif
5565 #ifndef TARGET_SPARC64
5566 ncp_insn:
5567 gen_exception(dc, TT_NCP_INSN);
5568 goto egress;
5569 #endif
5570 egress:
5571 if (dc->n_t32 != 0) {
5572 int i;
5573 for (i = dc->n_t32 - 1; i >= 0; --i) {
5574 tcg_temp_free_i32(dc->t32[i]);
5576 dc->n_t32 = 0;
5578 if (dc->n_ttl != 0) {
5579 int i;
5580 for (i = dc->n_ttl - 1; i >= 0; --i) {
5581 tcg_temp_free(dc->ttl[i]);
5583 dc->n_ttl = 0;
5587 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5589 SPARCCPU *cpu = sparc_env_get_cpu(env);
5590 CPUState *cs = CPU(cpu);
5591 target_ulong pc_start, last_pc;
5592 DisasContext dc1, *dc = &dc1;
5593 int num_insns;
5594 int max_insns;
5595 unsigned int insn;
5597 memset(dc, 0, sizeof(DisasContext));
5598 dc->tb = tb;
5599 pc_start = tb->pc;
5600 dc->pc = pc_start;
5601 last_pc = dc->pc;
5602 dc->npc = (target_ulong) tb->cs_base;
5603 dc->cc_op = CC_OP_DYNAMIC;
5604 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5605 dc->def = env->def;
5606 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5607 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5608 dc->singlestep = (cs->singlestep_enabled || singlestep);
5609 #ifdef TARGET_SPARC64
5610 dc->fprs_dirty = 0;
5611 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5612 #endif
5614 num_insns = 0;
5615 max_insns = tb->cflags & CF_COUNT_MASK;
5616 if (max_insns == 0) {
5617 max_insns = CF_COUNT_MASK;
5619 if (max_insns > TCG_MAX_INSNS) {
5620 max_insns = TCG_MAX_INSNS;
5623 gen_tb_start(tb);
5624 do {
5625 if (dc->npc & JUMP_PC) {
5626 assert(dc->jump_pc[1] == dc->pc + 4);
5627 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5628 } else {
5629 tcg_gen_insn_start(dc->pc, dc->npc);
5631 num_insns++;
5632 last_pc = dc->pc;
5634 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5635 if (dc->pc != pc_start) {
5636 save_state(dc);
5638 gen_helper_debug(cpu_env);
5639 tcg_gen_exit_tb(0);
5640 dc->is_br = 1;
5641 goto exit_gen_loop;
5644 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5645 gen_io_start();
5648 insn = cpu_ldl_code(env, dc->pc);
5650 disas_sparc_insn(dc, insn);
5652 if (dc->is_br)
5653 break;
5654 /* if the next PC is different, we abort now */
5655 if (dc->pc != (last_pc + 4))
5656 break;
5657 /* if we reach a page boundary, we stop generation so that the
5658 PC of a TT_TFAULT exception is always in the right page */
5659 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5660 break;
5661 /* if single step mode, we generate only one instruction and
5662 generate an exception */
5663 if (dc->singlestep) {
5664 break;
5666 } while (!tcg_op_buf_full() &&
5667 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5668 num_insns < max_insns);
5670 exit_gen_loop:
5671 if (tb->cflags & CF_LAST_IO) {
5672 gen_io_end();
5674 if (!dc->is_br) {
5675 if (dc->pc != DYNAMIC_PC &&
5676 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5677 /* static PC and NPC: we can use direct chaining */
5678 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5679 } else {
5680 if (dc->pc != DYNAMIC_PC) {
5681 tcg_gen_movi_tl(cpu_pc, dc->pc);
5683 save_npc(dc);
5684 tcg_gen_exit_tb(0);
5687 gen_tb_end(tb, num_insns);
5689 tb->size = last_pc + 4 - pc_start;
5690 tb->icount = num_insns;
5692 #ifdef DEBUG_DISAS
5693 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5694 && qemu_log_in_addr_range(pc_start)) {
5695 qemu_log("--------------\n");
5696 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5697 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5698 qemu_log("\n");
5700 #endif
5703 void gen_intermediate_code_init(CPUSPARCState *env)
5705 static int inited;
5706 static const char gregnames[32][4] = {
5707 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5708 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5709 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5710 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5712 static const char fregnames[32][4] = {
5713 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5714 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5715 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5716 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5719 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5720 #ifdef TARGET_SPARC64
5721 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5722 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5723 #else
5724 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5725 #endif
5726 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5727 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5730 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5731 #ifdef TARGET_SPARC64
5732 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5733 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5734 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5735 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5736 "hstick_cmpr" },
5737 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5738 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5739 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5740 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5741 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5742 #endif
5743 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5744 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5745 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5746 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5747 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5748 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5749 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5750 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5751 #ifndef CONFIG_USER_ONLY
5752 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5753 #endif
5756 unsigned int i;
5758 /* init various static tables */
5759 if (inited) {
5760 return;
5762 inited = 1;
5764 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5765 tcg_ctx.tcg_env = cpu_env;
5767 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5768 offsetof(CPUSPARCState, regwptr),
5769 "regwptr");
5771 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5772 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5775 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5776 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5779 TCGV_UNUSED(cpu_regs[0]);
5780 for (i = 1; i < 8; ++i) {
5781 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5782 offsetof(CPUSPARCState, gregs[i]),
5783 gregnames[i]);
5786 for (i = 8; i < 32; ++i) {
5787 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5788 (i - 8) * sizeof(target_ulong),
5789 gregnames[i]);
5792 for (i = 0; i < TARGET_DPREGS; i++) {
5793 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5794 offsetof(CPUSPARCState, fpr[i]),
5795 fregnames[i]);
5799 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5800 target_ulong *data)
5802 target_ulong pc = data[0];
5803 target_ulong npc = data[1];
5805 env->pc = pc;
5806 if (npc == DYNAMIC_PC) {
5807 /* dynamic NPC: already stored */
5808 } else if (npc & JUMP_PC) {
5809 /* jump PC: use 'cond' and the jump targets of the translation */
5810 if (env->cond) {
5811 env->npc = npc & ~3;
5812 } else {
5813 env->npc = pc + 4;
5815 } else {
5816 env->npc = npc;