target-sparc: Use MMU_PHYS_IDX for bypass asis
[qemu/ar7.git] / target-sparc / translate.c
blob86432accc5429f8c4895db7c36f64c80ecb2443d
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 #ifdef TARGET_SPARC64
86 int fprs_dirty;
87 int asi;
88 #endif
89 } DisasContext;
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
120 static int sign_extend(int x, int len)
122 len = 32 - len;
123 return (x << len) >> len;
126 #define IS_IMM (insn & (1<<13))
128 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
130 TCGv_i32 t;
131 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
132 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
133 return t;
136 static inline TCGv get_temp_tl(DisasContext *dc)
138 TCGv t;
139 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
140 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
141 return t;
144 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
146 #if defined(TARGET_SPARC64)
147 int bit = (rd < 32) ? 1 : 2;
148 /* If we know we've already set this bit within the TB,
149 we can avoid setting it again. */
150 if (!(dc->fprs_dirty & bit)) {
151 dc->fprs_dirty |= bit;
152 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
154 #endif
157 /* floating point registers moves */
158 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
160 #if TCG_TARGET_REG_BITS == 32
161 if (src & 1) {
162 return TCGV_LOW(cpu_fpr[src / 2]);
163 } else {
164 return TCGV_HIGH(cpu_fpr[src / 2]);
166 #else
167 if (src & 1) {
168 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
169 } else {
170 TCGv_i32 ret = get_temp_i32(dc);
171 TCGv_i64 t = tcg_temp_new_i64();
173 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
174 tcg_gen_extrl_i64_i32(ret, t);
175 tcg_temp_free_i64(t);
177 return ret;
179 #endif
182 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
184 #if TCG_TARGET_REG_BITS == 32
185 if (dst & 1) {
186 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
187 } else {
188 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
190 #else
191 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
192 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
193 (dst & 1 ? 0 : 32), 32);
194 #endif
195 gen_update_fprs_dirty(dc, dst);
198 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
200 return get_temp_i32(dc);
203 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
205 src = DFPREG(src);
206 return cpu_fpr[src / 2];
209 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
211 dst = DFPREG(dst);
212 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
213 gen_update_fprs_dirty(dc, dst);
216 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
218 return cpu_fpr[DFPREG(dst) / 2];
221 static void gen_op_load_fpr_QT0(unsigned int src)
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
226 offsetof(CPU_QuadU, ll.lower));
229 static void gen_op_load_fpr_QT1(unsigned int src)
231 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
234 offsetof(CPU_QuadU, ll.lower));
237 static void gen_op_store_QT0_fpr(unsigned int dst)
239 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
240 offsetof(CPU_QuadU, ll.upper));
241 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
242 offsetof(CPU_QuadU, ll.lower));
245 #ifdef TARGET_SPARC64
246 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
248 rd = QFPREG(rd);
249 rs = QFPREG(rs);
251 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
252 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
253 gen_update_fprs_dirty(dc, rd);
255 #endif
257 /* moves */
258 #ifdef CONFIG_USER_ONLY
259 #define supervisor(dc) 0
260 #ifdef TARGET_SPARC64
261 #define hypervisor(dc) 0
262 #endif
263 #else
264 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
265 #ifdef TARGET_SPARC64
266 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
267 #else
268 #endif
269 #endif
271 #ifdef TARGET_SPARC64
272 #ifndef TARGET_ABI32
273 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
274 #else
275 #define AM_CHECK(dc) (1)
276 #endif
277 #endif
279 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
281 #ifdef TARGET_SPARC64
282 if (AM_CHECK(dc))
283 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
284 #endif
287 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
289 if (reg > 0) {
290 assert(reg < 32);
291 return cpu_regs[reg];
292 } else {
293 TCGv t = get_temp_tl(dc);
294 tcg_gen_movi_tl(t, 0);
295 return t;
299 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
301 if (reg > 0) {
302 assert(reg < 32);
303 tcg_gen_mov_tl(cpu_regs[reg], v);
307 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
309 if (reg > 0) {
310 assert(reg < 32);
311 return cpu_regs[reg];
312 } else {
313 return get_temp_tl(dc);
317 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
318 target_ulong npc)
320 if (unlikely(s->singlestep)) {
321 return false;
324 #ifndef CONFIG_USER_ONLY
325 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
326 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
327 #else
328 return true;
329 #endif
332 static inline void gen_goto_tb(DisasContext *s, int tb_num,
333 target_ulong pc, target_ulong npc)
335 if (use_goto_tb(s, pc, npc)) {
336 /* jump to same page: we can use a direct jump */
337 tcg_gen_goto_tb(tb_num);
338 tcg_gen_movi_tl(cpu_pc, pc);
339 tcg_gen_movi_tl(cpu_npc, npc);
340 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
341 } else {
342 /* jump to another page: currently not optimized */
343 tcg_gen_movi_tl(cpu_pc, pc);
344 tcg_gen_movi_tl(cpu_npc, npc);
345 tcg_gen_exit_tb(0);
349 // XXX suboptimal
350 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
354 tcg_gen_andi_tl(reg, reg, 0x1);
357 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
359 tcg_gen_extu_i32_tl(reg, src);
360 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
361 tcg_gen_andi_tl(reg, reg, 0x1);
364 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
366 tcg_gen_extu_i32_tl(reg, src);
367 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
368 tcg_gen_andi_tl(reg, reg, 0x1);
371 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
373 tcg_gen_extu_i32_tl(reg, src);
374 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
375 tcg_gen_andi_tl(reg, reg, 0x1);
378 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
380 tcg_gen_mov_tl(cpu_cc_src, src1);
381 tcg_gen_mov_tl(cpu_cc_src2, src2);
382 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
383 tcg_gen_mov_tl(dst, cpu_cc_dst);
386 static TCGv_i32 gen_add32_carry32(void)
388 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
390 /* Carry is computed from a previous add: (dst < src) */
391 #if TARGET_LONG_BITS == 64
392 cc_src1_32 = tcg_temp_new_i32();
393 cc_src2_32 = tcg_temp_new_i32();
394 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
395 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
396 #else
397 cc_src1_32 = cpu_cc_dst;
398 cc_src2_32 = cpu_cc_src;
399 #endif
401 carry_32 = tcg_temp_new_i32();
402 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
404 #if TARGET_LONG_BITS == 64
405 tcg_temp_free_i32(cc_src1_32);
406 tcg_temp_free_i32(cc_src2_32);
407 #endif
409 return carry_32;
412 static TCGv_i32 gen_sub32_carry32(void)
414 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
416 /* Carry is computed from a previous borrow: (src1 < src2) */
417 #if TARGET_LONG_BITS == 64
418 cc_src1_32 = tcg_temp_new_i32();
419 cc_src2_32 = tcg_temp_new_i32();
420 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
421 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
422 #else
423 cc_src1_32 = cpu_cc_src;
424 cc_src2_32 = cpu_cc_src2;
425 #endif
427 carry_32 = tcg_temp_new_i32();
428 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
430 #if TARGET_LONG_BITS == 64
431 tcg_temp_free_i32(cc_src1_32);
432 tcg_temp_free_i32(cc_src2_32);
433 #endif
435 return carry_32;
438 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
439 TCGv src2, int update_cc)
441 TCGv_i32 carry_32;
442 TCGv carry;
444 switch (dc->cc_op) {
445 case CC_OP_DIV:
446 case CC_OP_LOGIC:
447 /* Carry is known to be zero. Fall back to plain ADD. */
448 if (update_cc) {
449 gen_op_add_cc(dst, src1, src2);
450 } else {
451 tcg_gen_add_tl(dst, src1, src2);
453 return;
455 case CC_OP_ADD:
456 case CC_OP_TADD:
457 case CC_OP_TADDTV:
458 if (TARGET_LONG_BITS == 32) {
459 /* We can re-use the host's hardware carry generation by using
460 an ADD2 opcode. We discard the low part of the output.
461 Ideally we'd combine this operation with the add that
462 generated the carry in the first place. */
463 carry = tcg_temp_new();
464 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
465 tcg_temp_free(carry);
466 goto add_done;
468 carry_32 = gen_add32_carry32();
469 break;
471 case CC_OP_SUB:
472 case CC_OP_TSUB:
473 case CC_OP_TSUBTV:
474 carry_32 = gen_sub32_carry32();
475 break;
477 default:
478 /* We need external help to produce the carry. */
479 carry_32 = tcg_temp_new_i32();
480 gen_helper_compute_C_icc(carry_32, cpu_env);
481 break;
484 #if TARGET_LONG_BITS == 64
485 carry = tcg_temp_new();
486 tcg_gen_extu_i32_i64(carry, carry_32);
487 #else
488 carry = carry_32;
489 #endif
491 tcg_gen_add_tl(dst, src1, src2);
492 tcg_gen_add_tl(dst, dst, carry);
494 tcg_temp_free_i32(carry_32);
495 #if TARGET_LONG_BITS == 64
496 tcg_temp_free(carry);
497 #endif
499 add_done:
500 if (update_cc) {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_mov_tl(cpu_cc_dst, dst);
504 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
505 dc->cc_op = CC_OP_ADDX;
509 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
511 tcg_gen_mov_tl(cpu_cc_src, src1);
512 tcg_gen_mov_tl(cpu_cc_src2, src2);
513 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
514 tcg_gen_mov_tl(dst, cpu_cc_dst);
517 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
518 TCGv src2, int update_cc)
520 TCGv_i32 carry_32;
521 TCGv carry;
523 switch (dc->cc_op) {
524 case CC_OP_DIV:
525 case CC_OP_LOGIC:
526 /* Carry is known to be zero. Fall back to plain SUB. */
527 if (update_cc) {
528 gen_op_sub_cc(dst, src1, src2);
529 } else {
530 tcg_gen_sub_tl(dst, src1, src2);
532 return;
534 case CC_OP_ADD:
535 case CC_OP_TADD:
536 case CC_OP_TADDTV:
537 carry_32 = gen_add32_carry32();
538 break;
540 case CC_OP_SUB:
541 case CC_OP_TSUB:
542 case CC_OP_TSUBTV:
543 if (TARGET_LONG_BITS == 32) {
544 /* We can re-use the host's hardware carry generation by using
545 a SUB2 opcode. We discard the low part of the output.
546 Ideally we'd combine this operation with the add that
547 generated the carry in the first place. */
548 carry = tcg_temp_new();
549 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
550 tcg_temp_free(carry);
551 goto sub_done;
553 carry_32 = gen_sub32_carry32();
554 break;
556 default:
557 /* We need external help to produce the carry. */
558 carry_32 = tcg_temp_new_i32();
559 gen_helper_compute_C_icc(carry_32, cpu_env);
560 break;
563 #if TARGET_LONG_BITS == 64
564 carry = tcg_temp_new();
565 tcg_gen_extu_i32_i64(carry, carry_32);
566 #else
567 carry = carry_32;
568 #endif
570 tcg_gen_sub_tl(dst, src1, src2);
571 tcg_gen_sub_tl(dst, dst, carry);
573 tcg_temp_free_i32(carry_32);
574 #if TARGET_LONG_BITS == 64
575 tcg_temp_free(carry);
576 #endif
578 sub_done:
579 if (update_cc) {
580 tcg_gen_mov_tl(cpu_cc_src, src1);
581 tcg_gen_mov_tl(cpu_cc_src2, src2);
582 tcg_gen_mov_tl(cpu_cc_dst, dst);
583 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
584 dc->cc_op = CC_OP_SUBX;
588 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
590 TCGv r_temp, zero, t0;
592 r_temp = tcg_temp_new();
593 t0 = tcg_temp_new();
595 /* old op:
596 if (!(env->y & 1))
597 T1 = 0;
599 zero = tcg_const_tl(0);
600 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
601 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
602 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
603 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
604 zero, cpu_cc_src2);
605 tcg_temp_free(zero);
607 // b2 = T0 & 1;
608 // env->y = (b2 << 31) | (env->y >> 1);
609 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
610 tcg_gen_shli_tl(r_temp, r_temp, 31);
611 tcg_gen_shri_tl(t0, cpu_y, 1);
612 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
613 tcg_gen_or_tl(t0, t0, r_temp);
614 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
616 // b1 = N ^ V;
617 gen_mov_reg_N(t0, cpu_psr);
618 gen_mov_reg_V(r_temp, cpu_psr);
619 tcg_gen_xor_tl(t0, t0, r_temp);
620 tcg_temp_free(r_temp);
622 // T0 = (b1 << 31) | (T0 >> 1);
623 // src1 = T0;
624 tcg_gen_shli_tl(t0, t0, 31);
625 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
626 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
627 tcg_temp_free(t0);
629 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
631 tcg_gen_mov_tl(dst, cpu_cc_dst);
634 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
636 #if TARGET_LONG_BITS == 32
637 if (sign_ext) {
638 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
639 } else {
640 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
642 #else
643 TCGv t0 = tcg_temp_new_i64();
644 TCGv t1 = tcg_temp_new_i64();
646 if (sign_ext) {
647 tcg_gen_ext32s_i64(t0, src1);
648 tcg_gen_ext32s_i64(t1, src2);
649 } else {
650 tcg_gen_ext32u_i64(t0, src1);
651 tcg_gen_ext32u_i64(t1, src2);
654 tcg_gen_mul_i64(dst, t0, t1);
655 tcg_temp_free(t0);
656 tcg_temp_free(t1);
658 tcg_gen_shri_i64(cpu_y, dst, 32);
659 #endif
662 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
664 /* zero-extend truncated operands before multiplication */
665 gen_op_multiply(dst, src1, src2, 0);
668 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
670 /* sign-extend truncated operands before multiplication */
671 gen_op_multiply(dst, src1, src2, 1);
674 // 1
675 static inline void gen_op_eval_ba(TCGv dst)
677 tcg_gen_movi_tl(dst, 1);
680 // Z
681 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
683 gen_mov_reg_Z(dst, src);
686 // Z | (N ^ V)
687 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
689 TCGv t0 = tcg_temp_new();
690 gen_mov_reg_N(t0, src);
691 gen_mov_reg_V(dst, src);
692 tcg_gen_xor_tl(dst, dst, t0);
693 gen_mov_reg_Z(t0, src);
694 tcg_gen_or_tl(dst, dst, t0);
695 tcg_temp_free(t0);
698 // N ^ V
699 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
701 TCGv t0 = tcg_temp_new();
702 gen_mov_reg_V(t0, src);
703 gen_mov_reg_N(dst, src);
704 tcg_gen_xor_tl(dst, dst, t0);
705 tcg_temp_free(t0);
708 // C | Z
709 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
711 TCGv t0 = tcg_temp_new();
712 gen_mov_reg_Z(t0, src);
713 gen_mov_reg_C(dst, src);
714 tcg_gen_or_tl(dst, dst, t0);
715 tcg_temp_free(t0);
718 // C
719 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
721 gen_mov_reg_C(dst, src);
724 // V
725 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
727 gen_mov_reg_V(dst, src);
730 // 0
731 static inline void gen_op_eval_bn(TCGv dst)
733 tcg_gen_movi_tl(dst, 0);
736 // N
737 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
739 gen_mov_reg_N(dst, src);
742 // !Z
743 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
745 gen_mov_reg_Z(dst, src);
746 tcg_gen_xori_tl(dst, dst, 0x1);
749 // !(Z | (N ^ V))
750 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
752 gen_op_eval_ble(dst, src);
753 tcg_gen_xori_tl(dst, dst, 0x1);
756 // !(N ^ V)
757 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
759 gen_op_eval_bl(dst, src);
760 tcg_gen_xori_tl(dst, dst, 0x1);
763 // !(C | Z)
764 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
766 gen_op_eval_bleu(dst, src);
767 tcg_gen_xori_tl(dst, dst, 0x1);
770 // !C
771 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
773 gen_mov_reg_C(dst, src);
774 tcg_gen_xori_tl(dst, dst, 0x1);
777 // !N
778 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
780 gen_mov_reg_N(dst, src);
781 tcg_gen_xori_tl(dst, dst, 0x1);
784 // !V
785 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
787 gen_mov_reg_V(dst, src);
788 tcg_gen_xori_tl(dst, dst, 0x1);
792 FPSR bit field FCC1 | FCC0:
796 3 unordered
798 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
799 unsigned int fcc_offset)
801 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
802 tcg_gen_andi_tl(reg, reg, 0x1);
805 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
806 unsigned int fcc_offset)
808 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
809 tcg_gen_andi_tl(reg, reg, 0x1);
812 // !0: FCC0 | FCC1
813 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
814 unsigned int fcc_offset)
816 TCGv t0 = tcg_temp_new();
817 gen_mov_reg_FCC0(dst, src, fcc_offset);
818 gen_mov_reg_FCC1(t0, src, fcc_offset);
819 tcg_gen_or_tl(dst, dst, t0);
820 tcg_temp_free(t0);
823 // 1 or 2: FCC0 ^ FCC1
824 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
825 unsigned int fcc_offset)
827 TCGv t0 = tcg_temp_new();
828 gen_mov_reg_FCC0(dst, src, fcc_offset);
829 gen_mov_reg_FCC1(t0, src, fcc_offset);
830 tcg_gen_xor_tl(dst, dst, t0);
831 tcg_temp_free(t0);
834 // 1 or 3: FCC0
835 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
841 // 1: FCC0 & !FCC1
842 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
845 TCGv t0 = tcg_temp_new();
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
847 gen_mov_reg_FCC1(t0, src, fcc_offset);
848 tcg_gen_andc_tl(dst, dst, t0);
849 tcg_temp_free(t0);
852 // 2 or 3: FCC1
853 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
854 unsigned int fcc_offset)
856 gen_mov_reg_FCC1(dst, src, fcc_offset);
859 // 2: !FCC0 & FCC1
860 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
861 unsigned int fcc_offset)
863 TCGv t0 = tcg_temp_new();
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 gen_mov_reg_FCC1(t0, src, fcc_offset);
866 tcg_gen_andc_tl(dst, t0, dst);
867 tcg_temp_free(t0);
870 // 3: FCC0 & FCC1
871 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
872 unsigned int fcc_offset)
874 TCGv t0 = tcg_temp_new();
875 gen_mov_reg_FCC0(dst, src, fcc_offset);
876 gen_mov_reg_FCC1(t0, src, fcc_offset);
877 tcg_gen_and_tl(dst, dst, t0);
878 tcg_temp_free(t0);
881 // 0: !(FCC0 | FCC1)
882 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
883 unsigned int fcc_offset)
885 TCGv t0 = tcg_temp_new();
886 gen_mov_reg_FCC0(dst, src, fcc_offset);
887 gen_mov_reg_FCC1(t0, src, fcc_offset);
888 tcg_gen_or_tl(dst, dst, t0);
889 tcg_gen_xori_tl(dst, dst, 0x1);
890 tcg_temp_free(t0);
893 // 0 or 3: !(FCC0 ^ FCC1)
894 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
895 unsigned int fcc_offset)
897 TCGv t0 = tcg_temp_new();
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 gen_mov_reg_FCC1(t0, src, fcc_offset);
900 tcg_gen_xor_tl(dst, dst, t0);
901 tcg_gen_xori_tl(dst, dst, 0x1);
902 tcg_temp_free(t0);
905 // 0 or 2: !FCC0
906 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 tcg_gen_xori_tl(dst, dst, 0x1);
913 // !1: !(FCC0 & !FCC1)
914 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
915 unsigned int fcc_offset)
917 TCGv t0 = tcg_temp_new();
918 gen_mov_reg_FCC0(dst, src, fcc_offset);
919 gen_mov_reg_FCC1(t0, src, fcc_offset);
920 tcg_gen_andc_tl(dst, dst, t0);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 tcg_temp_free(t0);
925 // 0 or 1: !FCC1
926 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
927 unsigned int fcc_offset)
929 gen_mov_reg_FCC1(dst, src, fcc_offset);
930 tcg_gen_xori_tl(dst, dst, 0x1);
933 // !2: !(!FCC0 & FCC1)
934 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
935 unsigned int fcc_offset)
937 TCGv t0 = tcg_temp_new();
938 gen_mov_reg_FCC0(dst, src, fcc_offset);
939 gen_mov_reg_FCC1(t0, src, fcc_offset);
940 tcg_gen_andc_tl(dst, t0, dst);
941 tcg_gen_xori_tl(dst, dst, 0x1);
942 tcg_temp_free(t0);
945 // !3: !(FCC0 & FCC1)
946 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
947 unsigned int fcc_offset)
949 TCGv t0 = tcg_temp_new();
950 gen_mov_reg_FCC0(dst, src, fcc_offset);
951 gen_mov_reg_FCC1(t0, src, fcc_offset);
952 tcg_gen_and_tl(dst, dst, t0);
953 tcg_gen_xori_tl(dst, dst, 0x1);
954 tcg_temp_free(t0);
957 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
958 target_ulong pc2, TCGv r_cond)
960 TCGLabel *l1 = gen_new_label();
962 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
964 gen_goto_tb(dc, 0, pc1, pc1 + 4);
966 gen_set_label(l1);
967 gen_goto_tb(dc, 1, pc2, pc2 + 4);
970 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
972 TCGLabel *l1 = gen_new_label();
973 target_ulong npc = dc->npc;
975 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
977 gen_goto_tb(dc, 0, npc, pc1);
979 gen_set_label(l1);
980 gen_goto_tb(dc, 1, npc + 4, npc + 8);
982 dc->is_br = 1;
985 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
987 target_ulong npc = dc->npc;
989 if (likely(npc != DYNAMIC_PC)) {
990 dc->pc = npc;
991 dc->jump_pc[0] = pc1;
992 dc->jump_pc[1] = npc + 4;
993 dc->npc = JUMP_PC;
994 } else {
995 TCGv t, z;
997 tcg_gen_mov_tl(cpu_pc, cpu_npc);
999 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1000 t = tcg_const_tl(pc1);
1001 z = tcg_const_tl(0);
1002 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1003 tcg_temp_free(t);
1004 tcg_temp_free(z);
1006 dc->pc = DYNAMIC_PC;
1010 static inline void gen_generic_branch(DisasContext *dc)
1012 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1013 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1014 TCGv zero = tcg_const_tl(0);
1016 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1018 tcg_temp_free(npc0);
1019 tcg_temp_free(npc1);
1020 tcg_temp_free(zero);
1023 /* call this function before using the condition register as it may
1024 have been set for a jump */
1025 static inline void flush_cond(DisasContext *dc)
1027 if (dc->npc == JUMP_PC) {
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC;
1033 static inline void save_npc(DisasContext *dc)
1035 if (dc->npc == JUMP_PC) {
1036 gen_generic_branch(dc);
1037 dc->npc = DYNAMIC_PC;
1038 } else if (dc->npc != DYNAMIC_PC) {
1039 tcg_gen_movi_tl(cpu_npc, dc->npc);
1043 static inline void update_psr(DisasContext *dc)
1045 if (dc->cc_op != CC_OP_FLAGS) {
1046 dc->cc_op = CC_OP_FLAGS;
1047 gen_helper_compute_psr(cpu_env);
1051 static inline void save_state(DisasContext *dc)
1053 tcg_gen_movi_tl(cpu_pc, dc->pc);
1054 save_npc(dc);
1057 static void gen_exception(DisasContext *dc, int which)
1059 TCGv_i32 t;
1061 save_state(dc);
1062 t = tcg_const_i32(which);
1063 gen_helper_raise_exception(cpu_env, t);
1064 tcg_temp_free_i32(t);
1065 dc->is_br = 1;
1068 static void gen_check_align(TCGv addr, int mask)
1070 TCGv_i32 r_mask = tcg_const_i32(mask);
1071 gen_helper_check_align(cpu_env, addr, r_mask);
1072 tcg_temp_free_i32(r_mask);
1075 static inline void gen_mov_pc_npc(DisasContext *dc)
1077 if (dc->npc == JUMP_PC) {
1078 gen_generic_branch(dc);
1079 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1080 dc->pc = DYNAMIC_PC;
1081 } else if (dc->npc == DYNAMIC_PC) {
1082 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1083 dc->pc = DYNAMIC_PC;
1084 } else {
1085 dc->pc = dc->npc;
1089 static inline void gen_op_next_insn(void)
1091 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1092 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1095 static void free_compare(DisasCompare *cmp)
1097 if (!cmp->g1) {
1098 tcg_temp_free(cmp->c1);
1100 if (!cmp->g2) {
1101 tcg_temp_free(cmp->c2);
1105 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1106 DisasContext *dc)
1108 static int subcc_cond[16] = {
1109 TCG_COND_NEVER,
1110 TCG_COND_EQ,
1111 TCG_COND_LE,
1112 TCG_COND_LT,
1113 TCG_COND_LEU,
1114 TCG_COND_LTU,
1115 -1, /* neg */
1116 -1, /* overflow */
1117 TCG_COND_ALWAYS,
1118 TCG_COND_NE,
1119 TCG_COND_GT,
1120 TCG_COND_GE,
1121 TCG_COND_GTU,
1122 TCG_COND_GEU,
1123 -1, /* pos */
1124 -1, /* no overflow */
1127 static int logic_cond[16] = {
1128 TCG_COND_NEVER,
1129 TCG_COND_EQ, /* eq: Z */
1130 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1131 TCG_COND_LT, /* lt: N ^ V -> N */
1132 TCG_COND_EQ, /* leu: C | Z -> Z */
1133 TCG_COND_NEVER, /* ltu: C -> 0 */
1134 TCG_COND_LT, /* neg: N */
1135 TCG_COND_NEVER, /* vs: V -> 0 */
1136 TCG_COND_ALWAYS,
1137 TCG_COND_NE, /* ne: !Z */
1138 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1139 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1140 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1141 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1142 TCG_COND_GE, /* pos: !N */
1143 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1146 TCGv_i32 r_src;
1147 TCGv r_dst;
1149 #ifdef TARGET_SPARC64
1150 if (xcc) {
1151 r_src = cpu_xcc;
1152 } else {
1153 r_src = cpu_psr;
1155 #else
1156 r_src = cpu_psr;
1157 #endif
1159 switch (dc->cc_op) {
1160 case CC_OP_LOGIC:
1161 cmp->cond = logic_cond[cond];
1162 do_compare_dst_0:
1163 cmp->is_bool = false;
1164 cmp->g2 = false;
1165 cmp->c2 = tcg_const_tl(0);
1166 #ifdef TARGET_SPARC64
1167 if (!xcc) {
1168 cmp->g1 = false;
1169 cmp->c1 = tcg_temp_new();
1170 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1171 break;
1173 #endif
1174 cmp->g1 = true;
1175 cmp->c1 = cpu_cc_dst;
1176 break;
1178 case CC_OP_SUB:
1179 switch (cond) {
1180 case 6: /* neg */
1181 case 14: /* pos */
1182 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1183 goto do_compare_dst_0;
1185 case 7: /* overflow */
1186 case 15: /* !overflow */
1187 goto do_dynamic;
1189 default:
1190 cmp->cond = subcc_cond[cond];
1191 cmp->is_bool = false;
1192 #ifdef TARGET_SPARC64
1193 if (!xcc) {
1194 /* Note that sign-extension works for unsigned compares as
1195 long as both operands are sign-extended. */
1196 cmp->g1 = cmp->g2 = false;
1197 cmp->c1 = tcg_temp_new();
1198 cmp->c2 = tcg_temp_new();
1199 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1200 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1201 break;
1203 #endif
1204 cmp->g1 = cmp->g2 = true;
1205 cmp->c1 = cpu_cc_src;
1206 cmp->c2 = cpu_cc_src2;
1207 break;
1209 break;
1211 default:
1212 do_dynamic:
1213 gen_helper_compute_psr(cpu_env);
1214 dc->cc_op = CC_OP_FLAGS;
1215 /* FALLTHRU */
1217 case CC_OP_FLAGS:
1218 /* We're going to generate a boolean result. */
1219 cmp->cond = TCG_COND_NE;
1220 cmp->is_bool = true;
1221 cmp->g1 = cmp->g2 = false;
1222 cmp->c1 = r_dst = tcg_temp_new();
1223 cmp->c2 = tcg_const_tl(0);
1225 switch (cond) {
1226 case 0x0:
1227 gen_op_eval_bn(r_dst);
1228 break;
1229 case 0x1:
1230 gen_op_eval_be(r_dst, r_src);
1231 break;
1232 case 0x2:
1233 gen_op_eval_ble(r_dst, r_src);
1234 break;
1235 case 0x3:
1236 gen_op_eval_bl(r_dst, r_src);
1237 break;
1238 case 0x4:
1239 gen_op_eval_bleu(r_dst, r_src);
1240 break;
1241 case 0x5:
1242 gen_op_eval_bcs(r_dst, r_src);
1243 break;
1244 case 0x6:
1245 gen_op_eval_bneg(r_dst, r_src);
1246 break;
1247 case 0x7:
1248 gen_op_eval_bvs(r_dst, r_src);
1249 break;
1250 case 0x8:
1251 gen_op_eval_ba(r_dst);
1252 break;
1253 case 0x9:
1254 gen_op_eval_bne(r_dst, r_src);
1255 break;
1256 case 0xa:
1257 gen_op_eval_bg(r_dst, r_src);
1258 break;
1259 case 0xb:
1260 gen_op_eval_bge(r_dst, r_src);
1261 break;
1262 case 0xc:
1263 gen_op_eval_bgu(r_dst, r_src);
1264 break;
1265 case 0xd:
1266 gen_op_eval_bcc(r_dst, r_src);
1267 break;
1268 case 0xe:
1269 gen_op_eval_bpos(r_dst, r_src);
1270 break;
1271 case 0xf:
1272 gen_op_eval_bvc(r_dst, r_src);
1273 break;
1275 break;
1279 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1281 unsigned int offset;
1282 TCGv r_dst;
1284 /* For now we still generate a straight boolean result. */
1285 cmp->cond = TCG_COND_NE;
1286 cmp->is_bool = true;
1287 cmp->g1 = cmp->g2 = false;
1288 cmp->c1 = r_dst = tcg_temp_new();
1289 cmp->c2 = tcg_const_tl(0);
1291 switch (cc) {
1292 default:
1293 case 0x0:
1294 offset = 0;
1295 break;
1296 case 0x1:
1297 offset = 32 - 10;
1298 break;
1299 case 0x2:
1300 offset = 34 - 10;
1301 break;
1302 case 0x3:
1303 offset = 36 - 10;
1304 break;
1307 switch (cond) {
1308 case 0x0:
1309 gen_op_eval_bn(r_dst);
1310 break;
1311 case 0x1:
1312 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0x2:
1315 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0x3:
1318 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0x4:
1321 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0x5:
1324 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0x6:
1327 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1328 break;
1329 case 0x7:
1330 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1331 break;
1332 case 0x8:
1333 gen_op_eval_ba(r_dst);
1334 break;
1335 case 0x9:
1336 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0xa:
1339 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1340 break;
1341 case 0xb:
1342 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0xc:
1345 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0xd:
1348 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0xe:
1351 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0xf:
1354 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1355 break;
1359 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1360 DisasContext *dc)
1362 DisasCompare cmp;
1363 gen_compare(&cmp, cc, cond, dc);
1365 /* The interface is to return a boolean in r_dst. */
1366 if (cmp.is_bool) {
1367 tcg_gen_mov_tl(r_dst, cmp.c1);
1368 } else {
1369 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1372 free_compare(&cmp);
1375 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1377 DisasCompare cmp;
1378 gen_fcompare(&cmp, cc, cond);
1380 /* The interface is to return a boolean in r_dst. */
1381 if (cmp.is_bool) {
1382 tcg_gen_mov_tl(r_dst, cmp.c1);
1383 } else {
1384 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1387 free_compare(&cmp);
1390 #ifdef TARGET_SPARC64
1391 // Inverted logic
1392 static const int gen_tcg_cond_reg[8] = {
1394 TCG_COND_NE,
1395 TCG_COND_GT,
1396 TCG_COND_GE,
1398 TCG_COND_EQ,
1399 TCG_COND_LE,
1400 TCG_COND_LT,
1403 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1405 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1406 cmp->is_bool = false;
1407 cmp->g1 = true;
1408 cmp->g2 = false;
1409 cmp->c1 = r_src;
1410 cmp->c2 = tcg_const_tl(0);
1413 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1415 DisasCompare cmp;
1416 gen_compare_reg(&cmp, cond, r_src);
1418 /* The interface is to return a boolean in r_dst. */
1419 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1421 free_compare(&cmp);
1423 #endif
1425 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1427 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1428 target_ulong target = dc->pc + offset;
1430 #ifdef TARGET_SPARC64
1431 if (unlikely(AM_CHECK(dc))) {
1432 target &= 0xffffffffULL;
1434 #endif
1435 if (cond == 0x0) {
1436 /* unconditional not taken */
1437 if (a) {
1438 dc->pc = dc->npc + 4;
1439 dc->npc = dc->pc + 4;
1440 } else {
1441 dc->pc = dc->npc;
1442 dc->npc = dc->pc + 4;
1444 } else if (cond == 0x8) {
1445 /* unconditional taken */
1446 if (a) {
1447 dc->pc = target;
1448 dc->npc = dc->pc + 4;
1449 } else {
1450 dc->pc = dc->npc;
1451 dc->npc = target;
1452 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1454 } else {
1455 flush_cond(dc);
1456 gen_cond(cpu_cond, cc, cond, dc);
1457 if (a) {
1458 gen_branch_a(dc, target);
1459 } else {
1460 gen_branch_n(dc, target);
1465 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1467 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1468 target_ulong target = dc->pc + offset;
1470 #ifdef TARGET_SPARC64
1471 if (unlikely(AM_CHECK(dc))) {
1472 target &= 0xffffffffULL;
1474 #endif
1475 if (cond == 0x0) {
1476 /* unconditional not taken */
1477 if (a) {
1478 dc->pc = dc->npc + 4;
1479 dc->npc = dc->pc + 4;
1480 } else {
1481 dc->pc = dc->npc;
1482 dc->npc = dc->pc + 4;
1484 } else if (cond == 0x8) {
1485 /* unconditional taken */
1486 if (a) {
1487 dc->pc = target;
1488 dc->npc = dc->pc + 4;
1489 } else {
1490 dc->pc = dc->npc;
1491 dc->npc = target;
1492 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1494 } else {
1495 flush_cond(dc);
1496 gen_fcond(cpu_cond, cc, cond);
1497 if (a) {
1498 gen_branch_a(dc, target);
1499 } else {
1500 gen_branch_n(dc, target);
1505 #ifdef TARGET_SPARC64
1506 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1507 TCGv r_reg)
1509 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1510 target_ulong target = dc->pc + offset;
1512 if (unlikely(AM_CHECK(dc))) {
1513 target &= 0xffffffffULL;
1515 flush_cond(dc);
1516 gen_cond_reg(cpu_cond, cond, r_reg);
1517 if (a) {
1518 gen_branch_a(dc, target);
1519 } else {
1520 gen_branch_n(dc, target);
1524 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1526 switch (fccno) {
1527 case 0:
1528 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 1:
1531 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 2:
1534 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 3:
1537 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1538 break;
1542 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1544 switch (fccno) {
1545 case 0:
1546 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 1:
1549 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 2:
1552 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 3:
1555 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1556 break;
1560 static inline void gen_op_fcmpq(int fccno)
1562 switch (fccno) {
1563 case 0:
1564 gen_helper_fcmpq(cpu_fsr, cpu_env);
1565 break;
1566 case 1:
1567 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1568 break;
1569 case 2:
1570 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1571 break;
1572 case 3:
1573 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1574 break;
1578 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1580 switch (fccno) {
1581 case 0:
1582 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 1:
1585 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 2:
1588 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1589 break;
1590 case 3:
1591 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1592 break;
1596 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1598 switch (fccno) {
1599 case 0:
1600 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 1:
1603 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 2:
1606 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1607 break;
1608 case 3:
1609 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1610 break;
1614 static inline void gen_op_fcmpeq(int fccno)
1616 switch (fccno) {
1617 case 0:
1618 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1619 break;
1620 case 1:
1621 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1622 break;
1623 case 2:
1624 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1625 break;
1626 case 3:
1627 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1628 break;
1632 #else
1634 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1636 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1639 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1641 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1644 static inline void gen_op_fcmpq(int fccno)
1646 gen_helper_fcmpq(cpu_fsr, cpu_env);
1649 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1651 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1654 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1659 static inline void gen_op_fcmpeq(int fccno)
1661 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1663 #endif
1665 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1667 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1668 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1669 gen_exception(dc, TT_FP_EXCP);
1672 static int gen_trap_ifnofpu(DisasContext *dc)
1674 #if !defined(CONFIG_USER_ONLY)
1675 if (!dc->fpu_enabled) {
1676 gen_exception(dc, TT_NFPU_INSN);
1677 return 1;
1679 #endif
1680 return 0;
1683 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1685 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1688 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1689 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1691 TCGv_i32 dst, src;
1693 src = gen_load_fpr_F(dc, rs);
1694 dst = gen_dest_fpr_F(dc);
1696 gen(dst, cpu_env, src);
1697 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1699 gen_store_fpr_F(dc, rd, dst);
1702 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1703 void (*gen)(TCGv_i32, TCGv_i32))
1705 TCGv_i32 dst, src;
1707 src = gen_load_fpr_F(dc, rs);
1708 dst = gen_dest_fpr_F(dc);
1710 gen(dst, src);
1712 gen_store_fpr_F(dc, rd, dst);
1715 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1716 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1718 TCGv_i32 dst, src1, src2;
1720 src1 = gen_load_fpr_F(dc, rs1);
1721 src2 = gen_load_fpr_F(dc, rs2);
1722 dst = gen_dest_fpr_F(dc);
1724 gen(dst, cpu_env, src1, src2);
1725 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1727 gen_store_fpr_F(dc, rd, dst);
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1732 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1734 TCGv_i32 dst, src1, src2;
1736 src1 = gen_load_fpr_F(dc, rs1);
1737 src2 = gen_load_fpr_F(dc, rs2);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src1, src2);
1742 gen_store_fpr_F(dc, rd, dst);
1744 #endif
1746 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1747 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1749 TCGv_i64 dst, src;
1751 src = gen_load_fpr_D(dc, rs);
1752 dst = gen_dest_fpr_D(dc, rd);
1754 gen(dst, cpu_env, src);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_D(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1762 void (*gen)(TCGv_i64, TCGv_i64))
1764 TCGv_i64 dst, src;
1766 src = gen_load_fpr_D(dc, rs);
1767 dst = gen_dest_fpr_D(dc, rd);
1769 gen(dst, src);
1771 gen_store_fpr_D(dc, rd, dst);
1773 #endif
1775 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1776 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1778 TCGv_i64 dst, src1, src2;
1780 src1 = gen_load_fpr_D(dc, rs1);
1781 src2 = gen_load_fpr_D(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src1, src2);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src1, src2;
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1800 gen(dst, src1, src2);
1802 gen_store_fpr_D(dc, rd, dst);
1805 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_gsr, src1, src2);
1816 gen_store_fpr_D(dc, rd, dst);
1819 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1820 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1822 TCGv_i64 dst, src0, src1, src2;
1824 src1 = gen_load_fpr_D(dc, rs1);
1825 src2 = gen_load_fpr_D(dc, rs2);
1826 src0 = gen_load_fpr_D(dc, rd);
1827 dst = gen_dest_fpr_D(dc, rd);
1829 gen(dst, src0, src1, src2);
1831 gen_store_fpr_D(dc, rd, dst);
1833 #endif
1835 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1836 void (*gen)(TCGv_ptr))
1838 gen_op_load_fpr_QT1(QFPREG(rs));
1840 gen(cpu_env);
1841 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1843 gen_op_store_QT0_fpr(QFPREG(rd));
1844 gen_update_fprs_dirty(dc, QFPREG(rd));
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1849 void (*gen)(TCGv_ptr))
1851 gen_op_load_fpr_QT1(QFPREG(rs));
1853 gen(cpu_env);
1855 gen_op_store_QT0_fpr(QFPREG(rd));
1856 gen_update_fprs_dirty(dc, QFPREG(rd));
1858 #endif
1860 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1861 void (*gen)(TCGv_ptr))
1863 gen_op_load_fpr_QT0(QFPREG(rs1));
1864 gen_op_load_fpr_QT1(QFPREG(rs2));
1866 gen(cpu_env);
1867 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1869 gen_op_store_QT0_fpr(QFPREG(rd));
1870 gen_update_fprs_dirty(dc, QFPREG(rd));
1873 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1874 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1876 TCGv_i64 dst;
1877 TCGv_i32 src1, src2;
1879 src1 = gen_load_fpr_F(dc, rs1);
1880 src2 = gen_load_fpr_F(dc, rs2);
1881 dst = gen_dest_fpr_D(dc, rd);
1883 gen(dst, cpu_env, src1, src2);
1884 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1886 gen_store_fpr_D(dc, rd, dst);
1889 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1892 TCGv_i64 src1, src2;
1894 src1 = gen_load_fpr_D(dc, rs1);
1895 src2 = gen_load_fpr_D(dc, rs2);
1897 gen(cpu_env, src1, src2);
1898 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1900 gen_op_store_QT0_fpr(QFPREG(rd));
1901 gen_update_fprs_dirty(dc, QFPREG(rd));
1904 #ifdef TARGET_SPARC64
1905 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1906 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1908 TCGv_i64 dst;
1909 TCGv_i32 src;
1911 src = gen_load_fpr_F(dc, rs);
1912 dst = gen_dest_fpr_D(dc, rd);
1914 gen(dst, cpu_env, src);
1915 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1917 gen_store_fpr_D(dc, rd, dst);
1919 #endif
1921 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1924 TCGv_i64 dst;
1925 TCGv_i32 src;
1927 src = gen_load_fpr_F(dc, rs);
1928 dst = gen_dest_fpr_D(dc, rd);
1930 gen(dst, cpu_env, src);
1932 gen_store_fpr_D(dc, rd, dst);
1935 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1938 TCGv_i32 dst;
1939 TCGv_i64 src;
1941 src = gen_load_fpr_D(dc, rs);
1942 dst = gen_dest_fpr_F(dc);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_F(dc, rd, dst);
1950 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1951 void (*gen)(TCGv_i32, TCGv_ptr))
1953 TCGv_i32 dst;
1955 gen_op_load_fpr_QT1(QFPREG(rs));
1956 dst = gen_dest_fpr_F(dc);
1958 gen(dst, cpu_env);
1959 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1961 gen_store_fpr_F(dc, rd, dst);
1964 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1965 void (*gen)(TCGv_i64, TCGv_ptr))
1967 TCGv_i64 dst;
1969 gen_op_load_fpr_QT1(QFPREG(rs));
1970 dst = gen_dest_fpr_D(dc, rd);
1972 gen(dst, cpu_env);
1973 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1975 gen_store_fpr_D(dc, rd, dst);
1978 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1979 void (*gen)(TCGv_ptr, TCGv_i32))
1981 TCGv_i32 src;
1983 src = gen_load_fpr_F(dc, rs);
1985 gen(cpu_env, src);
1987 gen_op_store_QT0_fpr(QFPREG(rd));
1988 gen_update_fprs_dirty(dc, QFPREG(rd));
1991 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1992 void (*gen)(TCGv_ptr, TCGv_i64))
1994 TCGv_i64 src;
1996 src = gen_load_fpr_D(dc, rs);
1998 gen(cpu_env, src);
2000 gen_op_store_QT0_fpr(QFPREG(rd));
2001 gen_update_fprs_dirty(dc, QFPREG(rd));
2004 /* asi moves */
2005 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2006 typedef enum {
2007 GET_ASI_HELPER,
2008 GET_ASI_EXCP,
2009 GET_ASI_DIRECT,
2010 GET_ASI_DTWINX,
2011 GET_ASI_BLOCK,
2012 GET_ASI_SHORT,
2013 } ASIType;
2015 typedef struct {
2016 ASIType type;
2017 int asi;
2018 int mem_idx;
2019 TCGMemOp memop;
2020 } DisasASI;
2022 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2024 int asi = GET_FIELD(insn, 19, 26);
2025 ASIType type = GET_ASI_HELPER;
2026 int mem_idx = dc->mem_idx;
2028 #ifndef TARGET_SPARC64
2029 /* Before v9, all asis are immediate and privileged. */
2030 if (IS_IMM) {
2031 gen_exception(dc, TT_ILL_INSN);
2032 type = GET_ASI_EXCP;
2033 } else if (supervisor(dc)
2034 /* Note that LEON accepts ASI_USERDATA in user mode, for
2035 use with CASA. Also note that previous versions of
2036 QEMU allowed (and old versions of gcc emitted) ASI_P
2037 for LEON, which is incorrect. */
2038 || (asi == ASI_USERDATA
2039 && (dc->def->features & CPU_FEATURE_CASA))) {
2040 switch (asi) {
2041 case ASI_USERDATA: /* User data access */
2042 mem_idx = MMU_USER_IDX;
2043 type = GET_ASI_DIRECT;
2044 break;
2045 case ASI_KERNELDATA: /* Supervisor data access */
2046 mem_idx = MMU_KERNEL_IDX;
2047 type = GET_ASI_DIRECT;
2048 break;
2049 case ASI_M_BYPASS: /* MMU passthrough */
2050 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2051 mem_idx = MMU_PHYS_IDX;
2052 type = GET_ASI_DIRECT;
2053 break;
2055 } else {
2056 gen_exception(dc, TT_PRIV_INSN);
2057 type = GET_ASI_EXCP;
2059 #else
2060 if (IS_IMM) {
2061 asi = dc->asi;
2063 /* With v9, all asis below 0x80 are privileged. */
2064 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2065 down that bit into DisasContext. For the moment that's ok,
2066 since the direct implementations below doesn't have any ASIs
2067 in the restricted [0x30, 0x7f] range, and the check will be
2068 done properly in the helper. */
2069 if (!supervisor(dc) && asi < 0x80) {
2070 gen_exception(dc, TT_PRIV_ACT);
2071 type = GET_ASI_EXCP;
2072 } else {
2073 switch (asi) {
2074 case ASI_REAL: /* Bypass */
2075 case ASI_REAL_IO: /* Bypass, non-cacheable */
2076 case ASI_REAL_L: /* Bypass LE */
2077 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2078 case ASI_TWINX_REAL: /* Real address, twinx */
2079 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2080 mem_idx = MMU_PHYS_IDX;
2081 break;
2082 case ASI_N: /* Nucleus */
2083 case ASI_NL: /* Nucleus LE */
2084 case ASI_TWINX_N:
2085 case ASI_TWINX_NL:
2086 mem_idx = MMU_NUCLEUS_IDX;
2087 break;
2088 case ASI_AIUP: /* As if user primary */
2089 case ASI_AIUPL: /* As if user primary LE */
2090 case ASI_TWINX_AIUP:
2091 case ASI_TWINX_AIUP_L:
2092 case ASI_BLK_AIUP_4V:
2093 case ASI_BLK_AIUP_L_4V:
2094 case ASI_BLK_AIUP:
2095 case ASI_BLK_AIUPL:
2096 mem_idx = MMU_USER_IDX;
2097 break;
2098 case ASI_AIUS: /* As if user secondary */
2099 case ASI_AIUSL: /* As if user secondary LE */
2100 case ASI_TWINX_AIUS:
2101 case ASI_TWINX_AIUS_L:
2102 case ASI_BLK_AIUS_4V:
2103 case ASI_BLK_AIUS_L_4V:
2104 case ASI_BLK_AIUS:
2105 case ASI_BLK_AIUSL:
2106 mem_idx = MMU_USER_SECONDARY_IDX;
2107 break;
2108 case ASI_S: /* Secondary */
2109 case ASI_SL: /* Secondary LE */
2110 case ASI_TWINX_S:
2111 case ASI_TWINX_SL:
2112 case ASI_BLK_COMMIT_S:
2113 case ASI_BLK_S:
2114 case ASI_BLK_SL:
2115 case ASI_FL8_S:
2116 case ASI_FL8_SL:
2117 case ASI_FL16_S:
2118 case ASI_FL16_SL:
2119 if (mem_idx == MMU_USER_IDX) {
2120 mem_idx = MMU_USER_SECONDARY_IDX;
2121 } else if (mem_idx == MMU_KERNEL_IDX) {
2122 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2124 break;
2125 case ASI_P: /* Primary */
2126 case ASI_PL: /* Primary LE */
2127 case ASI_TWINX_P:
2128 case ASI_TWINX_PL:
2129 case ASI_BLK_COMMIT_P:
2130 case ASI_BLK_P:
2131 case ASI_BLK_PL:
2132 case ASI_FL8_P:
2133 case ASI_FL8_PL:
2134 case ASI_FL16_P:
2135 case ASI_FL16_PL:
2136 break;
2138 switch (asi) {
2139 case ASI_REAL:
2140 case ASI_REAL_IO:
2141 case ASI_REAL_L:
2142 case ASI_REAL_IO_L:
2143 case ASI_N:
2144 case ASI_NL:
2145 case ASI_AIUP:
2146 case ASI_AIUPL:
2147 case ASI_AIUS:
2148 case ASI_AIUSL:
2149 case ASI_S:
2150 case ASI_SL:
2151 case ASI_P:
2152 case ASI_PL:
2153 type = GET_ASI_DIRECT;
2154 break;
2155 case ASI_TWINX_REAL:
2156 case ASI_TWINX_REAL_L:
2157 case ASI_TWINX_N:
2158 case ASI_TWINX_NL:
2159 case ASI_TWINX_AIUP:
2160 case ASI_TWINX_AIUP_L:
2161 case ASI_TWINX_AIUS:
2162 case ASI_TWINX_AIUS_L:
2163 case ASI_TWINX_P:
2164 case ASI_TWINX_PL:
2165 case ASI_TWINX_S:
2166 case ASI_TWINX_SL:
2167 type = GET_ASI_DTWINX;
2168 break;
2169 case ASI_BLK_COMMIT_P:
2170 case ASI_BLK_COMMIT_S:
2171 case ASI_BLK_AIUP_4V:
2172 case ASI_BLK_AIUP_L_4V:
2173 case ASI_BLK_AIUP:
2174 case ASI_BLK_AIUPL:
2175 case ASI_BLK_AIUS_4V:
2176 case ASI_BLK_AIUS_L_4V:
2177 case ASI_BLK_AIUS:
2178 case ASI_BLK_AIUSL:
2179 case ASI_BLK_S:
2180 case ASI_BLK_SL:
2181 case ASI_BLK_P:
2182 case ASI_BLK_PL:
2183 type = GET_ASI_BLOCK;
2184 break;
2185 case ASI_FL8_S:
2186 case ASI_FL8_SL:
2187 case ASI_FL8_P:
2188 case ASI_FL8_PL:
2189 memop = MO_UB;
2190 type = GET_ASI_SHORT;
2191 break;
2192 case ASI_FL16_S:
2193 case ASI_FL16_SL:
2194 case ASI_FL16_P:
2195 case ASI_FL16_PL:
2196 memop = MO_TEUW;
2197 type = GET_ASI_SHORT;
2198 break;
2200 /* The little-endian asis all have bit 3 set. */
2201 if (asi & 8) {
2202 memop ^= MO_BSWAP;
2205 #endif
2207 return (DisasASI){ type, asi, mem_idx, memop };
2210 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2211 int insn, TCGMemOp memop)
2213 DisasASI da = get_asi(dc, insn, memop);
2215 switch (da.type) {
2216 case GET_ASI_EXCP:
2217 break;
2218 case GET_ASI_DTWINX: /* Reserved for ldda. */
2219 gen_exception(dc, TT_ILL_INSN);
2220 break;
2221 case GET_ASI_DIRECT:
2222 gen_address_mask(dc, addr);
2223 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2224 break;
2225 default:
2227 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2228 TCGv_i32 r_mop = tcg_const_i32(memop);
2230 save_state(dc);
2231 #ifdef TARGET_SPARC64
2232 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2233 #else
2235 TCGv_i64 t64 = tcg_temp_new_i64();
2236 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2237 tcg_gen_trunc_i64_tl(dst, t64);
2238 tcg_temp_free_i64(t64);
2240 #endif
2241 tcg_temp_free_i32(r_mop);
2242 tcg_temp_free_i32(r_asi);
2244 break;
2248 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2249 int insn, TCGMemOp memop)
2251 DisasASI da = get_asi(dc, insn, memop);
2253 switch (da.type) {
2254 case GET_ASI_EXCP:
2255 break;
2256 case GET_ASI_DTWINX: /* Reserved for stda. */
2257 gen_exception(dc, TT_ILL_INSN);
2258 break;
2259 case GET_ASI_DIRECT:
2260 gen_address_mask(dc, addr);
2261 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2262 break;
2263 default:
2265 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2266 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2268 save_state(dc);
2269 #ifdef TARGET_SPARC64
2270 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2271 #else
2273 TCGv_i64 t64 = tcg_temp_new_i64();
2274 tcg_gen_extu_tl_i64(t64, src);
2275 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2276 tcg_temp_free_i64(t64);
2278 #endif
2279 tcg_temp_free_i32(r_mop);
2280 tcg_temp_free_i32(r_asi);
2282 /* A write to a TLB register may alter page maps. End the TB. */
2283 dc->npc = DYNAMIC_PC;
2285 break;
2289 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2290 TCGv addr, int insn)
2292 DisasASI da = get_asi(dc, insn, MO_TEUL);
2294 switch (da.type) {
2295 case GET_ASI_EXCP:
2296 break;
2297 default:
2299 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2300 TCGv_i32 r_mop = tcg_const_i32(MO_UL);
2301 TCGv_i64 s64, t64;
2303 save_state(dc);
2304 t64 = tcg_temp_new_i64();
2305 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2307 s64 = tcg_temp_new_i64();
2308 tcg_gen_extu_tl_i64(s64, src);
2309 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2310 tcg_temp_free_i64(s64);
2311 tcg_temp_free_i32(r_mop);
2312 tcg_temp_free_i32(r_asi);
2314 tcg_gen_trunc_i64_tl(dst, t64);
2315 tcg_temp_free_i64(t64);
2317 break;
2321 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv val2,
2322 int insn, int rd)
2324 DisasASI da = get_asi(dc, insn, MO_TEUL);
2325 TCGv val1, dst;
2326 TCGv_i32 r_asi;
2328 if (da.type == GET_ASI_EXCP) {
2329 return;
2332 save_state(dc);
2333 val1 = gen_load_gpr(dc, rd);
2334 dst = gen_dest_gpr(dc, rd);
2335 r_asi = tcg_const_i32(da.asi);
2336 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2337 tcg_temp_free_i32(r_asi);
2338 gen_store_gpr(dc, rd, dst);
2341 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2343 DisasASI da = get_asi(dc, insn, MO_UB);
2345 switch (da.type) {
2346 case GET_ASI_EXCP:
2347 break;
2348 default:
2350 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2351 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2352 TCGv_i64 s64, t64;
2354 save_state(dc);
2355 t64 = tcg_temp_new_i64();
2356 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2358 s64 = tcg_const_i64(0xff);
2359 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2360 tcg_temp_free_i64(s64);
2361 tcg_temp_free_i32(r_mop);
2362 tcg_temp_free_i32(r_asi);
2364 tcg_gen_trunc_i64_tl(dst, t64);
2365 tcg_temp_free_i64(t64);
2367 break;
2370 #endif
2372 #ifdef TARGET_SPARC64
2373 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2374 int insn, int size, int rd)
2376 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2377 TCGv_i32 d32;
2379 switch (da.type) {
2380 case GET_ASI_EXCP:
2381 break;
2383 case GET_ASI_DIRECT:
2384 gen_address_mask(dc, addr);
2385 switch (size) {
2386 case 4:
2387 d32 = gen_dest_fpr_F(dc);
2388 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2389 gen_store_fpr_F(dc, rd, d32);
2390 break;
2391 case 8:
2392 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2393 break;
2394 case 16:
2395 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2396 tcg_gen_addi_tl(addr, addr, 8);
2397 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2398 break;
2399 default:
2400 g_assert_not_reached();
2402 break;
2404 case GET_ASI_BLOCK:
2405 /* Valid for lddfa on aligned registers only. */
2406 if (size == 8 && (rd & 7) == 0) {
2407 TCGMemOp memop;
2408 TCGv eight;
2409 int i;
2411 gen_address_mask(dc, addr);
2413 /* The first operation checks required alignment. */
2414 memop = da.memop | MO_ALIGN_64;
2415 eight = tcg_const_tl(8);
2416 for (i = 0; ; ++i) {
2417 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2418 da.mem_idx, memop);
2419 if (i == 7) {
2420 break;
2422 tcg_gen_add_tl(addr, addr, eight);
2423 memop = da.memop;
2425 tcg_temp_free(eight);
2426 } else {
2427 gen_exception(dc, TT_ILL_INSN);
2429 break;
2431 case GET_ASI_SHORT:
2432 /* Valid for lddfa only. */
2433 if (size == 8) {
2434 gen_address_mask(dc, addr);
2435 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2436 } else {
2437 gen_exception(dc, TT_ILL_INSN);
2439 break;
2441 default:
2443 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2444 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2446 save_state(dc);
2447 /* According to the table in the UA2011 manual, the only
2448 other asis that are valid for ldfa/lddfa/ldqfa are
2449 the NO_FAULT asis. We still need a helper for these,
2450 but we can just use the integer asi helper for them. */
2451 switch (size) {
2452 case 4:
2454 TCGv d64 = tcg_temp_new_i64();
2455 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2456 d32 = gen_dest_fpr_F(dc);
2457 tcg_gen_extrl_i64_i32(d32, d64);
2458 tcg_temp_free_i64(d64);
2459 gen_store_fpr_F(dc, rd, d32);
2461 break;
2462 case 8:
2463 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2464 break;
2465 case 16:
2466 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2467 tcg_gen_addi_tl(addr, addr, 8);
2468 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2469 break;
2470 default:
2471 g_assert_not_reached();
2473 tcg_temp_free_i32(r_mop);
2474 tcg_temp_free_i32(r_asi);
2476 break;
2480 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2481 int insn, int size, int rd)
2483 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2484 TCGv_i32 d32;
2486 switch (da.type) {
2487 case GET_ASI_EXCP:
2488 break;
2490 case GET_ASI_DIRECT:
2491 gen_address_mask(dc, addr);
2492 switch (size) {
2493 case 4:
2494 d32 = gen_load_fpr_F(dc, rd);
2495 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2496 break;
2497 case 8:
2498 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2499 break;
2500 case 16:
2501 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2502 tcg_gen_addi_tl(addr, addr, 8);
2503 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2504 break;
2505 default:
2506 g_assert_not_reached();
2508 break;
2510 case GET_ASI_BLOCK:
2511 /* Valid for stdfa on aligned registers only. */
2512 if (size == 8 && (rd & 7) == 0) {
2513 TCGMemOp memop;
2514 TCGv eight;
2515 int i;
2517 gen_address_mask(dc, addr);
2519 /* The first operation checks required alignment. */
2520 memop = da.memop | MO_ALIGN_64;
2521 eight = tcg_const_tl(8);
2522 for (i = 0; ; ++i) {
2523 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2524 da.mem_idx, memop);
2525 if (i == 7) {
2526 break;
2528 tcg_gen_add_tl(addr, addr, eight);
2529 memop = da.memop;
2531 tcg_temp_free(eight);
2532 } else {
2533 gen_exception(dc, TT_ILL_INSN);
2535 break;
2537 case GET_ASI_SHORT:
2538 /* Valid for stdfa only. */
2539 if (size == 8) {
2540 gen_address_mask(dc, addr);
2541 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2542 } else {
2543 gen_exception(dc, TT_ILL_INSN);
2545 break;
2547 default:
2548 /* According to the table in the UA2011 manual, the only
2549 other asis that are valid for ldfa/lddfa/ldqfa are
2550 the PST* asis, which aren't currently handled. */
2551 gen_exception(dc, TT_ILL_INSN);
2552 break;
2556 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2558 DisasASI da = get_asi(dc, insn, MO_TEQ);
2559 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2560 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2562 switch (da.type) {
2563 case GET_ASI_EXCP:
2564 return;
2566 case GET_ASI_DTWINX:
2567 gen_address_mask(dc, addr);
2568 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2569 tcg_gen_addi_tl(addr, addr, 8);
2570 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2571 break;
2573 case GET_ASI_DIRECT:
2575 TCGv_i64 tmp = tcg_temp_new_i64();
2577 gen_address_mask(dc, addr);
2578 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2580 /* Note that LE ldda acts as if each 32-bit register
2581 result is byte swapped. Having just performed one
2582 64-bit bswap, we need now to swap the writebacks. */
2583 if ((da.memop & MO_BSWAP) == MO_TE) {
2584 tcg_gen_extr32_i64(lo, hi, tmp);
2585 } else {
2586 tcg_gen_extr32_i64(hi, lo, tmp);
2588 tcg_temp_free_i64(tmp);
2590 break;
2592 default:
2594 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2596 save_state(dc);
2597 gen_helper_ldda_asi(cpu_env, addr, r_asi);
2598 tcg_temp_free_i32(r_asi);
2600 tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUSPARCState, qt0.high));
2601 tcg_gen_ld_i64(lo, cpu_env, offsetof(CPUSPARCState, qt0.low));
2603 break;
2606 gen_store_gpr(dc, rd, hi);
2607 gen_store_gpr(dc, rd + 1, lo);
2610 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2611 int insn, int rd)
2613 DisasASI da = get_asi(dc, insn, MO_TEQ);
2614 TCGv lo = gen_load_gpr(dc, rd + 1);
2616 switch (da.type) {
2617 case GET_ASI_EXCP:
2618 break;
2620 case GET_ASI_DTWINX:
2621 gen_address_mask(dc, addr);
2622 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2623 tcg_gen_addi_tl(addr, addr, 8);
2624 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2625 break;
2627 case GET_ASI_DIRECT:
2629 TCGv_i64 t64 = tcg_temp_new_i64();
2631 /* Note that LE stda acts as if each 32-bit register result is
2632 byte swapped. We will perform one 64-bit LE store, so now
2633 we must swap the order of the construction. */
2634 if ((da.memop & MO_BSWAP) == MO_TE) {
2635 tcg_gen_concat32_i64(t64, lo, hi);
2636 } else {
2637 tcg_gen_concat32_i64(t64, hi, lo);
2639 gen_address_mask(dc, addr);
2640 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2641 tcg_temp_free_i64(t64);
2643 break;
2645 default:
2647 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2648 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2649 TCGv_i64 t64;
2651 save_state(dc);
2653 t64 = tcg_temp_new_i64();
2654 tcg_gen_concat_tl_i64(t64, lo, hi);
2655 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2656 tcg_temp_free_i32(r_mop);
2657 tcg_temp_free_i32(r_asi);
2658 tcg_temp_free_i64(t64);
2660 break;
2664 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv val2,
2665 int insn, int rd)
2667 DisasASI da = get_asi(dc, insn, MO_TEQ);
2668 TCGv val1 = gen_load_gpr(dc, rd);
2669 TCGv dst = gen_dest_gpr(dc, rd);
2670 TCGv_i32 r_asi;
2672 if (da.type == GET_ASI_EXCP) {
2673 return;
2676 save_state(dc);
2677 r_asi = tcg_const_i32(da.asi);
2678 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2679 tcg_temp_free_i32(r_asi);
2680 gen_store_gpr(dc, rd, dst);
2683 #elif !defined(CONFIG_USER_ONLY)
2684 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2686 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2687 whereby "rd + 1" elicits "error: array subscript is above array".
2688 Since we have already asserted that rd is even, the semantics
2689 are unchanged. */
2690 TCGv lo = gen_dest_gpr(dc, rd | 1);
2691 TCGv hi = gen_dest_gpr(dc, rd);
2692 TCGv_i64 t64 = tcg_temp_new_i64();
2693 DisasASI da = get_asi(dc, insn, MO_TEQ);
2695 switch (da.type) {
2696 case GET_ASI_EXCP:
2697 tcg_temp_free_i64(t64);
2698 return;
2699 case GET_ASI_DIRECT:
2700 gen_address_mask(dc, addr);
2701 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2702 break;
2703 default:
2705 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2706 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2708 save_state(dc);
2709 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2710 tcg_temp_free_i32(r_mop);
2711 tcg_temp_free_i32(r_asi);
2713 break;
2716 tcg_gen_extr_i64_i32(lo, hi, t64);
2717 tcg_temp_free_i64(t64);
2718 gen_store_gpr(dc, rd | 1, lo);
2719 gen_store_gpr(dc, rd, hi);
2722 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2723 int insn, int rd)
2725 DisasASI da = get_asi(dc, insn, MO_TEQ);
2726 TCGv lo = gen_load_gpr(dc, rd + 1);
2727 TCGv_i64 t64 = tcg_temp_new_i64();
2729 tcg_gen_concat_tl_i64(t64, lo, hi);
2731 switch (da.type) {
2732 case GET_ASI_EXCP:
2733 break;
2734 case GET_ASI_DIRECT:
2735 gen_address_mask(dc, addr);
2736 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2737 break;
2738 default:
2740 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2741 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2743 save_state(dc);
2744 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2745 tcg_temp_free_i32(r_mop);
2746 tcg_temp_free_i32(r_asi);
2748 break;
2751 tcg_temp_free_i64(t64);
2753 #endif
2755 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2757 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2758 return gen_load_gpr(dc, rs1);
2761 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2763 if (IS_IMM) { /* immediate */
2764 target_long simm = GET_FIELDs(insn, 19, 31);
2765 TCGv t = get_temp_tl(dc);
2766 tcg_gen_movi_tl(t, simm);
2767 return t;
2768 } else { /* register */
2769 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2770 return gen_load_gpr(dc, rs2);
2774 #ifdef TARGET_SPARC64
2775 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2777 TCGv_i32 c32, zero, dst, s1, s2;
2779 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2780 or fold the comparison down to 32 bits and use movcond_i32. Choose
2781 the later. */
2782 c32 = tcg_temp_new_i32();
2783 if (cmp->is_bool) {
2784 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2785 } else {
2786 TCGv_i64 c64 = tcg_temp_new_i64();
2787 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2788 tcg_gen_extrl_i64_i32(c32, c64);
2789 tcg_temp_free_i64(c64);
2792 s1 = gen_load_fpr_F(dc, rs);
2793 s2 = gen_load_fpr_F(dc, rd);
2794 dst = gen_dest_fpr_F(dc);
2795 zero = tcg_const_i32(0);
2797 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2799 tcg_temp_free_i32(c32);
2800 tcg_temp_free_i32(zero);
2801 gen_store_fpr_F(dc, rd, dst);
2804 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2806 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2807 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2808 gen_load_fpr_D(dc, rs),
2809 gen_load_fpr_D(dc, rd));
2810 gen_store_fpr_D(dc, rd, dst);
2813 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2815 int qd = QFPREG(rd);
2816 int qs = QFPREG(rs);
2818 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2819 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2820 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2821 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2823 gen_update_fprs_dirty(dc, qd);
2826 #ifndef CONFIG_USER_ONLY
2827 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2829 TCGv_i32 r_tl = tcg_temp_new_i32();
2831 /* load env->tl into r_tl */
2832 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2834 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2835 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2837 /* calculate offset to current trap state from env->ts, reuse r_tl */
2838 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2839 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2841 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2843 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2844 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2845 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2846 tcg_temp_free_ptr(r_tl_tmp);
2849 tcg_temp_free_i32(r_tl);
2851 #endif
2853 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2854 int width, bool cc, bool left)
2856 TCGv lo1, lo2, t1, t2;
2857 uint64_t amask, tabl, tabr;
2858 int shift, imask, omask;
2860 if (cc) {
2861 tcg_gen_mov_tl(cpu_cc_src, s1);
2862 tcg_gen_mov_tl(cpu_cc_src2, s2);
2863 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2864 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2865 dc->cc_op = CC_OP_SUB;
2868 /* Theory of operation: there are two tables, left and right (not to
2869 be confused with the left and right versions of the opcode). These
2870 are indexed by the low 3 bits of the inputs. To make things "easy",
2871 these tables are loaded into two constants, TABL and TABR below.
2872 The operation index = (input & imask) << shift calculates the index
2873 into the constant, while val = (table >> index) & omask calculates
2874 the value we're looking for. */
2875 switch (width) {
2876 case 8:
2877 imask = 0x7;
2878 shift = 3;
2879 omask = 0xff;
2880 if (left) {
2881 tabl = 0x80c0e0f0f8fcfeffULL;
2882 tabr = 0xff7f3f1f0f070301ULL;
2883 } else {
2884 tabl = 0x0103070f1f3f7fffULL;
2885 tabr = 0xfffefcf8f0e0c080ULL;
2887 break;
2888 case 16:
2889 imask = 0x6;
2890 shift = 1;
2891 omask = 0xf;
2892 if (left) {
2893 tabl = 0x8cef;
2894 tabr = 0xf731;
2895 } else {
2896 tabl = 0x137f;
2897 tabr = 0xfec8;
2899 break;
2900 case 32:
2901 imask = 0x4;
2902 shift = 0;
2903 omask = 0x3;
2904 if (left) {
2905 tabl = (2 << 2) | 3;
2906 tabr = (3 << 2) | 1;
2907 } else {
2908 tabl = (1 << 2) | 3;
2909 tabr = (3 << 2) | 2;
2911 break;
2912 default:
2913 abort();
2916 lo1 = tcg_temp_new();
2917 lo2 = tcg_temp_new();
2918 tcg_gen_andi_tl(lo1, s1, imask);
2919 tcg_gen_andi_tl(lo2, s2, imask);
2920 tcg_gen_shli_tl(lo1, lo1, shift);
2921 tcg_gen_shli_tl(lo2, lo2, shift);
2923 t1 = tcg_const_tl(tabl);
2924 t2 = tcg_const_tl(tabr);
2925 tcg_gen_shr_tl(lo1, t1, lo1);
2926 tcg_gen_shr_tl(lo2, t2, lo2);
2927 tcg_gen_andi_tl(dst, lo1, omask);
2928 tcg_gen_andi_tl(lo2, lo2, omask);
2930 amask = -8;
2931 if (AM_CHECK(dc)) {
2932 amask &= 0xffffffffULL;
2934 tcg_gen_andi_tl(s1, s1, amask);
2935 tcg_gen_andi_tl(s2, s2, amask);
2937 /* We want to compute
2938 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2939 We've already done dst = lo1, so this reduces to
2940 dst &= (s1 == s2 ? -1 : lo2)
2941 Which we perform by
2942 lo2 |= -(s1 == s2)
2943 dst &= lo2
2945 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2946 tcg_gen_neg_tl(t1, t1);
2947 tcg_gen_or_tl(lo2, lo2, t1);
2948 tcg_gen_and_tl(dst, dst, lo2);
2950 tcg_temp_free(lo1);
2951 tcg_temp_free(lo2);
2952 tcg_temp_free(t1);
2953 tcg_temp_free(t2);
2956 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2958 TCGv tmp = tcg_temp_new();
2960 tcg_gen_add_tl(tmp, s1, s2);
2961 tcg_gen_andi_tl(dst, tmp, -8);
2962 if (left) {
2963 tcg_gen_neg_tl(tmp, tmp);
2965 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2967 tcg_temp_free(tmp);
2970 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2972 TCGv t1, t2, shift;
2974 t1 = tcg_temp_new();
2975 t2 = tcg_temp_new();
2976 shift = tcg_temp_new();
2978 tcg_gen_andi_tl(shift, gsr, 7);
2979 tcg_gen_shli_tl(shift, shift, 3);
2980 tcg_gen_shl_tl(t1, s1, shift);
2982 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2983 shift of (up to 63) followed by a constant shift of 1. */
2984 tcg_gen_xori_tl(shift, shift, 63);
2985 tcg_gen_shr_tl(t2, s2, shift);
2986 tcg_gen_shri_tl(t2, t2, 1);
2988 tcg_gen_or_tl(dst, t1, t2);
2990 tcg_temp_free(t1);
2991 tcg_temp_free(t2);
2992 tcg_temp_free(shift);
2994 #endif
2996 #define CHECK_IU_FEATURE(dc, FEATURE) \
2997 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2998 goto illegal_insn;
2999 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3000 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3001 goto nfpu_insn;
3003 /* before an instruction, dc->pc must be static */
3004 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3006 unsigned int opc, rs1, rs2, rd;
3007 TCGv cpu_src1, cpu_src2;
3008 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3009 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3010 target_long simm;
3012 opc = GET_FIELD(insn, 0, 1);
3013 rd = GET_FIELD(insn, 2, 6);
3015 switch (opc) {
3016 case 0: /* branches/sethi */
3018 unsigned int xop = GET_FIELD(insn, 7, 9);
3019 int32_t target;
3020 switch (xop) {
3021 #ifdef TARGET_SPARC64
3022 case 0x1: /* V9 BPcc */
3024 int cc;
3026 target = GET_FIELD_SP(insn, 0, 18);
3027 target = sign_extend(target, 19);
3028 target <<= 2;
3029 cc = GET_FIELD_SP(insn, 20, 21);
3030 if (cc == 0)
3031 do_branch(dc, target, insn, 0);
3032 else if (cc == 2)
3033 do_branch(dc, target, insn, 1);
3034 else
3035 goto illegal_insn;
3036 goto jmp_insn;
3038 case 0x3: /* V9 BPr */
3040 target = GET_FIELD_SP(insn, 0, 13) |
3041 (GET_FIELD_SP(insn, 20, 21) << 14);
3042 target = sign_extend(target, 16);
3043 target <<= 2;
3044 cpu_src1 = get_src1(dc, insn);
3045 do_branch_reg(dc, target, insn, cpu_src1);
3046 goto jmp_insn;
3048 case 0x5: /* V9 FBPcc */
3050 int cc = GET_FIELD_SP(insn, 20, 21);
3051 if (gen_trap_ifnofpu(dc)) {
3052 goto jmp_insn;
3054 target = GET_FIELD_SP(insn, 0, 18);
3055 target = sign_extend(target, 19);
3056 target <<= 2;
3057 do_fbranch(dc, target, insn, cc);
3058 goto jmp_insn;
3060 #else
3061 case 0x7: /* CBN+x */
3063 goto ncp_insn;
3065 #endif
3066 case 0x2: /* BN+x */
3068 target = GET_FIELD(insn, 10, 31);
3069 target = sign_extend(target, 22);
3070 target <<= 2;
3071 do_branch(dc, target, insn, 0);
3072 goto jmp_insn;
3074 case 0x6: /* FBN+x */
3076 if (gen_trap_ifnofpu(dc)) {
3077 goto jmp_insn;
3079 target = GET_FIELD(insn, 10, 31);
3080 target = sign_extend(target, 22);
3081 target <<= 2;
3082 do_fbranch(dc, target, insn, 0);
3083 goto jmp_insn;
3085 case 0x4: /* SETHI */
3086 /* Special-case %g0 because that's the canonical nop. */
3087 if (rd) {
3088 uint32_t value = GET_FIELD(insn, 10, 31);
3089 TCGv t = gen_dest_gpr(dc, rd);
3090 tcg_gen_movi_tl(t, value << 10);
3091 gen_store_gpr(dc, rd, t);
3093 break;
3094 case 0x0: /* UNIMPL */
3095 default:
3096 goto illegal_insn;
3098 break;
3100 break;
3101 case 1: /*CALL*/
3103 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3104 TCGv o7 = gen_dest_gpr(dc, 15);
3106 tcg_gen_movi_tl(o7, dc->pc);
3107 gen_store_gpr(dc, 15, o7);
3108 target += dc->pc;
3109 gen_mov_pc_npc(dc);
3110 #ifdef TARGET_SPARC64
3111 if (unlikely(AM_CHECK(dc))) {
3112 target &= 0xffffffffULL;
3114 #endif
3115 dc->npc = target;
3117 goto jmp_insn;
3118 case 2: /* FPU & Logical Operations */
3120 unsigned int xop = GET_FIELD(insn, 7, 12);
3121 TCGv cpu_dst = get_temp_tl(dc);
3122 TCGv cpu_tmp0;
3124 if (xop == 0x3a) { /* generate trap */
3125 int cond = GET_FIELD(insn, 3, 6);
3126 TCGv_i32 trap;
3127 TCGLabel *l1 = NULL;
3128 int mask;
3130 if (cond == 0) {
3131 /* Trap never. */
3132 break;
3135 save_state(dc);
3137 if (cond != 8) {
3138 /* Conditional trap. */
3139 DisasCompare cmp;
3140 #ifdef TARGET_SPARC64
3141 /* V9 icc/xcc */
3142 int cc = GET_FIELD_SP(insn, 11, 12);
3143 if (cc == 0) {
3144 gen_compare(&cmp, 0, cond, dc);
3145 } else if (cc == 2) {
3146 gen_compare(&cmp, 1, cond, dc);
3147 } else {
3148 goto illegal_insn;
3150 #else
3151 gen_compare(&cmp, 0, cond, dc);
3152 #endif
3153 l1 = gen_new_label();
3154 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3155 cmp.c1, cmp.c2, l1);
3156 free_compare(&cmp);
3159 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3160 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3162 /* Don't use the normal temporaries, as they may well have
3163 gone out of scope with the branch above. While we're
3164 doing that we might as well pre-truncate to 32-bit. */
3165 trap = tcg_temp_new_i32();
3167 rs1 = GET_FIELD_SP(insn, 14, 18);
3168 if (IS_IMM) {
3169 rs2 = GET_FIELD_SP(insn, 0, 6);
3170 if (rs1 == 0) {
3171 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3172 /* Signal that the trap value is fully constant. */
3173 mask = 0;
3174 } else {
3175 TCGv t1 = gen_load_gpr(dc, rs1);
3176 tcg_gen_trunc_tl_i32(trap, t1);
3177 tcg_gen_addi_i32(trap, trap, rs2);
3179 } else {
3180 TCGv t1, t2;
3181 rs2 = GET_FIELD_SP(insn, 0, 4);
3182 t1 = gen_load_gpr(dc, rs1);
3183 t2 = gen_load_gpr(dc, rs2);
3184 tcg_gen_add_tl(t1, t1, t2);
3185 tcg_gen_trunc_tl_i32(trap, t1);
3187 if (mask != 0) {
3188 tcg_gen_andi_i32(trap, trap, mask);
3189 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3192 gen_helper_raise_exception(cpu_env, trap);
3193 tcg_temp_free_i32(trap);
3195 if (cond == 8) {
3196 /* An unconditional trap ends the TB. */
3197 dc->is_br = 1;
3198 goto jmp_insn;
3199 } else {
3200 /* A conditional trap falls through to the next insn. */
3201 gen_set_label(l1);
3202 break;
3204 } else if (xop == 0x28) {
3205 rs1 = GET_FIELD(insn, 13, 17);
3206 switch(rs1) {
3207 case 0: /* rdy */
3208 #ifndef TARGET_SPARC64
3209 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3210 manual, rdy on the microSPARC
3211 II */
3212 case 0x0f: /* stbar in the SPARCv8 manual,
3213 rdy on the microSPARC II */
3214 case 0x10 ... 0x1f: /* implementation-dependent in the
3215 SPARCv8 manual, rdy on the
3216 microSPARC II */
3217 /* Read Asr17 */
3218 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3219 TCGv t = gen_dest_gpr(dc, rd);
3220 /* Read Asr17 for a Leon3 monoprocessor */
3221 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3222 gen_store_gpr(dc, rd, t);
3223 break;
3225 #endif
3226 gen_store_gpr(dc, rd, cpu_y);
3227 break;
3228 #ifdef TARGET_SPARC64
3229 case 0x2: /* V9 rdccr */
3230 update_psr(dc);
3231 gen_helper_rdccr(cpu_dst, cpu_env);
3232 gen_store_gpr(dc, rd, cpu_dst);
3233 break;
3234 case 0x3: /* V9 rdasi */
3235 tcg_gen_movi_tl(cpu_dst, dc->asi);
3236 gen_store_gpr(dc, rd, cpu_dst);
3237 break;
3238 case 0x4: /* V9 rdtick */
3240 TCGv_ptr r_tickptr;
3241 TCGv_i32 r_const;
3243 r_tickptr = tcg_temp_new_ptr();
3244 r_const = tcg_const_i32(dc->mem_idx);
3245 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3246 offsetof(CPUSPARCState, tick));
3247 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3248 r_const);
3249 tcg_temp_free_ptr(r_tickptr);
3250 tcg_temp_free_i32(r_const);
3251 gen_store_gpr(dc, rd, cpu_dst);
3253 break;
3254 case 0x5: /* V9 rdpc */
3256 TCGv t = gen_dest_gpr(dc, rd);
3257 if (unlikely(AM_CHECK(dc))) {
3258 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3259 } else {
3260 tcg_gen_movi_tl(t, dc->pc);
3262 gen_store_gpr(dc, rd, t);
3264 break;
3265 case 0x6: /* V9 rdfprs */
3266 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3267 gen_store_gpr(dc, rd, cpu_dst);
3268 break;
3269 case 0xf: /* V9 membar */
3270 break; /* no effect */
3271 case 0x13: /* Graphics Status */
3272 if (gen_trap_ifnofpu(dc)) {
3273 goto jmp_insn;
3275 gen_store_gpr(dc, rd, cpu_gsr);
3276 break;
3277 case 0x16: /* Softint */
3278 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3279 offsetof(CPUSPARCState, softint));
3280 gen_store_gpr(dc, rd, cpu_dst);
3281 break;
3282 case 0x17: /* Tick compare */
3283 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3284 break;
3285 case 0x18: /* System tick */
3287 TCGv_ptr r_tickptr;
3288 TCGv_i32 r_const;
3290 r_tickptr = tcg_temp_new_ptr();
3291 r_const = tcg_const_i32(dc->mem_idx);
3292 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3293 offsetof(CPUSPARCState, stick));
3294 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3295 r_const);
3296 tcg_temp_free_ptr(r_tickptr);
3297 tcg_temp_free_i32(r_const);
3298 gen_store_gpr(dc, rd, cpu_dst);
3300 break;
3301 case 0x19: /* System tick compare */
3302 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3303 break;
3304 case 0x10: /* Performance Control */
3305 case 0x11: /* Performance Instrumentation Counter */
3306 case 0x12: /* Dispatch Control */
3307 case 0x14: /* Softint set, WO */
3308 case 0x15: /* Softint clear, WO */
3309 #endif
3310 default:
3311 goto illegal_insn;
3313 #if !defined(CONFIG_USER_ONLY)
3314 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3315 #ifndef TARGET_SPARC64
3316 if (!supervisor(dc)) {
3317 goto priv_insn;
3319 update_psr(dc);
3320 gen_helper_rdpsr(cpu_dst, cpu_env);
3321 #else
3322 CHECK_IU_FEATURE(dc, HYPV);
3323 if (!hypervisor(dc))
3324 goto priv_insn;
3325 rs1 = GET_FIELD(insn, 13, 17);
3326 switch (rs1) {
3327 case 0: // hpstate
3328 // gen_op_rdhpstate();
3329 break;
3330 case 1: // htstate
3331 // gen_op_rdhtstate();
3332 break;
3333 case 3: // hintp
3334 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3335 break;
3336 case 5: // htba
3337 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3338 break;
3339 case 6: // hver
3340 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3341 break;
3342 case 31: // hstick_cmpr
3343 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3344 break;
3345 default:
3346 goto illegal_insn;
3348 #endif
3349 gen_store_gpr(dc, rd, cpu_dst);
3350 break;
3351 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3352 if (!supervisor(dc)) {
3353 goto priv_insn;
3355 cpu_tmp0 = get_temp_tl(dc);
3356 #ifdef TARGET_SPARC64
3357 rs1 = GET_FIELD(insn, 13, 17);
3358 switch (rs1) {
3359 case 0: // tpc
3361 TCGv_ptr r_tsptr;
3363 r_tsptr = tcg_temp_new_ptr();
3364 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3365 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3366 offsetof(trap_state, tpc));
3367 tcg_temp_free_ptr(r_tsptr);
3369 break;
3370 case 1: // tnpc
3372 TCGv_ptr r_tsptr;
3374 r_tsptr = tcg_temp_new_ptr();
3375 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3376 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3377 offsetof(trap_state, tnpc));
3378 tcg_temp_free_ptr(r_tsptr);
3380 break;
3381 case 2: // tstate
3383 TCGv_ptr r_tsptr;
3385 r_tsptr = tcg_temp_new_ptr();
3386 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3387 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3388 offsetof(trap_state, tstate));
3389 tcg_temp_free_ptr(r_tsptr);
3391 break;
3392 case 3: // tt
3394 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3396 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3397 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3398 offsetof(trap_state, tt));
3399 tcg_temp_free_ptr(r_tsptr);
3401 break;
3402 case 4: // tick
3404 TCGv_ptr r_tickptr;
3405 TCGv_i32 r_const;
3407 r_tickptr = tcg_temp_new_ptr();
3408 r_const = tcg_const_i32(dc->mem_idx);
3409 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3410 offsetof(CPUSPARCState, tick));
3411 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3412 r_tickptr, r_const);
3413 tcg_temp_free_ptr(r_tickptr);
3414 tcg_temp_free_i32(r_const);
3416 break;
3417 case 5: // tba
3418 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3419 break;
3420 case 6: // pstate
3421 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3422 offsetof(CPUSPARCState, pstate));
3423 break;
3424 case 7: // tl
3425 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3426 offsetof(CPUSPARCState, tl));
3427 break;
3428 case 8: // pil
3429 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3430 offsetof(CPUSPARCState, psrpil));
3431 break;
3432 case 9: // cwp
3433 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3434 break;
3435 case 10: // cansave
3436 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3437 offsetof(CPUSPARCState, cansave));
3438 break;
3439 case 11: // canrestore
3440 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3441 offsetof(CPUSPARCState, canrestore));
3442 break;
3443 case 12: // cleanwin
3444 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3445 offsetof(CPUSPARCState, cleanwin));
3446 break;
3447 case 13: // otherwin
3448 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3449 offsetof(CPUSPARCState, otherwin));
3450 break;
3451 case 14: // wstate
3452 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3453 offsetof(CPUSPARCState, wstate));
3454 break;
3455 case 16: // UA2005 gl
3456 CHECK_IU_FEATURE(dc, GL);
3457 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3458 offsetof(CPUSPARCState, gl));
3459 break;
3460 case 26: // UA2005 strand status
3461 CHECK_IU_FEATURE(dc, HYPV);
3462 if (!hypervisor(dc))
3463 goto priv_insn;
3464 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3465 break;
3466 case 31: // ver
3467 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3468 break;
3469 case 15: // fq
3470 default:
3471 goto illegal_insn;
3473 #else
3474 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3475 #endif
3476 gen_store_gpr(dc, rd, cpu_tmp0);
3477 break;
3478 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3479 #ifdef TARGET_SPARC64
3480 gen_helper_flushw(cpu_env);
3481 #else
3482 if (!supervisor(dc))
3483 goto priv_insn;
3484 gen_store_gpr(dc, rd, cpu_tbr);
3485 #endif
3486 break;
3487 #endif
3488 } else if (xop == 0x34) { /* FPU Operations */
3489 if (gen_trap_ifnofpu(dc)) {
3490 goto jmp_insn;
3492 gen_op_clear_ieee_excp_and_FTT();
3493 rs1 = GET_FIELD(insn, 13, 17);
3494 rs2 = GET_FIELD(insn, 27, 31);
3495 xop = GET_FIELD(insn, 18, 26);
3497 switch (xop) {
3498 case 0x1: /* fmovs */
3499 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3500 gen_store_fpr_F(dc, rd, cpu_src1_32);
3501 break;
3502 case 0x5: /* fnegs */
3503 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3504 break;
3505 case 0x9: /* fabss */
3506 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3507 break;
3508 case 0x29: /* fsqrts */
3509 CHECK_FPU_FEATURE(dc, FSQRT);
3510 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3511 break;
3512 case 0x2a: /* fsqrtd */
3513 CHECK_FPU_FEATURE(dc, FSQRT);
3514 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3515 break;
3516 case 0x2b: /* fsqrtq */
3517 CHECK_FPU_FEATURE(dc, FLOAT128);
3518 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3519 break;
3520 case 0x41: /* fadds */
3521 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3522 break;
3523 case 0x42: /* faddd */
3524 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3525 break;
3526 case 0x43: /* faddq */
3527 CHECK_FPU_FEATURE(dc, FLOAT128);
3528 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3529 break;
3530 case 0x45: /* fsubs */
3531 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3532 break;
3533 case 0x46: /* fsubd */
3534 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3535 break;
3536 case 0x47: /* fsubq */
3537 CHECK_FPU_FEATURE(dc, FLOAT128);
3538 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3539 break;
3540 case 0x49: /* fmuls */
3541 CHECK_FPU_FEATURE(dc, FMUL);
3542 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3543 break;
3544 case 0x4a: /* fmuld */
3545 CHECK_FPU_FEATURE(dc, FMUL);
3546 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3547 break;
3548 case 0x4b: /* fmulq */
3549 CHECK_FPU_FEATURE(dc, FLOAT128);
3550 CHECK_FPU_FEATURE(dc, FMUL);
3551 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3552 break;
3553 case 0x4d: /* fdivs */
3554 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3555 break;
3556 case 0x4e: /* fdivd */
3557 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3558 break;
3559 case 0x4f: /* fdivq */
3560 CHECK_FPU_FEATURE(dc, FLOAT128);
3561 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3562 break;
3563 case 0x69: /* fsmuld */
3564 CHECK_FPU_FEATURE(dc, FSMULD);
3565 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3566 break;
3567 case 0x6e: /* fdmulq */
3568 CHECK_FPU_FEATURE(dc, FLOAT128);
3569 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3570 break;
3571 case 0xc4: /* fitos */
3572 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3573 break;
3574 case 0xc6: /* fdtos */
3575 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3576 break;
3577 case 0xc7: /* fqtos */
3578 CHECK_FPU_FEATURE(dc, FLOAT128);
3579 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3580 break;
3581 case 0xc8: /* fitod */
3582 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3583 break;
3584 case 0xc9: /* fstod */
3585 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3586 break;
3587 case 0xcb: /* fqtod */
3588 CHECK_FPU_FEATURE(dc, FLOAT128);
3589 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3590 break;
3591 case 0xcc: /* fitoq */
3592 CHECK_FPU_FEATURE(dc, FLOAT128);
3593 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3594 break;
3595 case 0xcd: /* fstoq */
3596 CHECK_FPU_FEATURE(dc, FLOAT128);
3597 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3598 break;
3599 case 0xce: /* fdtoq */
3600 CHECK_FPU_FEATURE(dc, FLOAT128);
3601 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3602 break;
3603 case 0xd1: /* fstoi */
3604 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3605 break;
3606 case 0xd2: /* fdtoi */
3607 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3608 break;
3609 case 0xd3: /* fqtoi */
3610 CHECK_FPU_FEATURE(dc, FLOAT128);
3611 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3612 break;
3613 #ifdef TARGET_SPARC64
3614 case 0x2: /* V9 fmovd */
3615 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3616 gen_store_fpr_D(dc, rd, cpu_src1_64);
3617 break;
3618 case 0x3: /* V9 fmovq */
3619 CHECK_FPU_FEATURE(dc, FLOAT128);
3620 gen_move_Q(dc, rd, rs2);
3621 break;
3622 case 0x6: /* V9 fnegd */
3623 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3624 break;
3625 case 0x7: /* V9 fnegq */
3626 CHECK_FPU_FEATURE(dc, FLOAT128);
3627 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3628 break;
3629 case 0xa: /* V9 fabsd */
3630 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3631 break;
3632 case 0xb: /* V9 fabsq */
3633 CHECK_FPU_FEATURE(dc, FLOAT128);
3634 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3635 break;
3636 case 0x81: /* V9 fstox */
3637 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3638 break;
3639 case 0x82: /* V9 fdtox */
3640 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3641 break;
3642 case 0x83: /* V9 fqtox */
3643 CHECK_FPU_FEATURE(dc, FLOAT128);
3644 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3645 break;
3646 case 0x84: /* V9 fxtos */
3647 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3648 break;
3649 case 0x88: /* V9 fxtod */
3650 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3651 break;
3652 case 0x8c: /* V9 fxtoq */
3653 CHECK_FPU_FEATURE(dc, FLOAT128);
3654 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3655 break;
3656 #endif
3657 default:
3658 goto illegal_insn;
3660 } else if (xop == 0x35) { /* FPU Operations */
3661 #ifdef TARGET_SPARC64
3662 int cond;
3663 #endif
3664 if (gen_trap_ifnofpu(dc)) {
3665 goto jmp_insn;
3667 gen_op_clear_ieee_excp_and_FTT();
3668 rs1 = GET_FIELD(insn, 13, 17);
3669 rs2 = GET_FIELD(insn, 27, 31);
3670 xop = GET_FIELD(insn, 18, 26);
3672 #ifdef TARGET_SPARC64
3673 #define FMOVR(sz) \
3674 do { \
3675 DisasCompare cmp; \
3676 cond = GET_FIELD_SP(insn, 10, 12); \
3677 cpu_src1 = get_src1(dc, insn); \
3678 gen_compare_reg(&cmp, cond, cpu_src1); \
3679 gen_fmov##sz(dc, &cmp, rd, rs2); \
3680 free_compare(&cmp); \
3681 } while (0)
3683 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3684 FMOVR(s);
3685 break;
3686 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3687 FMOVR(d);
3688 break;
3689 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3690 CHECK_FPU_FEATURE(dc, FLOAT128);
3691 FMOVR(q);
3692 break;
3694 #undef FMOVR
3695 #endif
3696 switch (xop) {
3697 #ifdef TARGET_SPARC64
3698 #define FMOVCC(fcc, sz) \
3699 do { \
3700 DisasCompare cmp; \
3701 cond = GET_FIELD_SP(insn, 14, 17); \
3702 gen_fcompare(&cmp, fcc, cond); \
3703 gen_fmov##sz(dc, &cmp, rd, rs2); \
3704 free_compare(&cmp); \
3705 } while (0)
3707 case 0x001: /* V9 fmovscc %fcc0 */
3708 FMOVCC(0, s);
3709 break;
3710 case 0x002: /* V9 fmovdcc %fcc0 */
3711 FMOVCC(0, d);
3712 break;
3713 case 0x003: /* V9 fmovqcc %fcc0 */
3714 CHECK_FPU_FEATURE(dc, FLOAT128);
3715 FMOVCC(0, q);
3716 break;
3717 case 0x041: /* V9 fmovscc %fcc1 */
3718 FMOVCC(1, s);
3719 break;
3720 case 0x042: /* V9 fmovdcc %fcc1 */
3721 FMOVCC(1, d);
3722 break;
3723 case 0x043: /* V9 fmovqcc %fcc1 */
3724 CHECK_FPU_FEATURE(dc, FLOAT128);
3725 FMOVCC(1, q);
3726 break;
3727 case 0x081: /* V9 fmovscc %fcc2 */
3728 FMOVCC(2, s);
3729 break;
3730 case 0x082: /* V9 fmovdcc %fcc2 */
3731 FMOVCC(2, d);
3732 break;
3733 case 0x083: /* V9 fmovqcc %fcc2 */
3734 CHECK_FPU_FEATURE(dc, FLOAT128);
3735 FMOVCC(2, q);
3736 break;
3737 case 0x0c1: /* V9 fmovscc %fcc3 */
3738 FMOVCC(3, s);
3739 break;
3740 case 0x0c2: /* V9 fmovdcc %fcc3 */
3741 FMOVCC(3, d);
3742 break;
3743 case 0x0c3: /* V9 fmovqcc %fcc3 */
3744 CHECK_FPU_FEATURE(dc, FLOAT128);
3745 FMOVCC(3, q);
3746 break;
3747 #undef FMOVCC
3748 #define FMOVCC(xcc, sz) \
3749 do { \
3750 DisasCompare cmp; \
3751 cond = GET_FIELD_SP(insn, 14, 17); \
3752 gen_compare(&cmp, xcc, cond, dc); \
3753 gen_fmov##sz(dc, &cmp, rd, rs2); \
3754 free_compare(&cmp); \
3755 } while (0)
3757 case 0x101: /* V9 fmovscc %icc */
3758 FMOVCC(0, s);
3759 break;
3760 case 0x102: /* V9 fmovdcc %icc */
3761 FMOVCC(0, d);
3762 break;
3763 case 0x103: /* V9 fmovqcc %icc */
3764 CHECK_FPU_FEATURE(dc, FLOAT128);
3765 FMOVCC(0, q);
3766 break;
3767 case 0x181: /* V9 fmovscc %xcc */
3768 FMOVCC(1, s);
3769 break;
3770 case 0x182: /* V9 fmovdcc %xcc */
3771 FMOVCC(1, d);
3772 break;
3773 case 0x183: /* V9 fmovqcc %xcc */
3774 CHECK_FPU_FEATURE(dc, FLOAT128);
3775 FMOVCC(1, q);
3776 break;
3777 #undef FMOVCC
3778 #endif
3779 case 0x51: /* fcmps, V9 %fcc */
3780 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3781 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3782 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3783 break;
3784 case 0x52: /* fcmpd, V9 %fcc */
3785 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3786 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3787 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3788 break;
3789 case 0x53: /* fcmpq, V9 %fcc */
3790 CHECK_FPU_FEATURE(dc, FLOAT128);
3791 gen_op_load_fpr_QT0(QFPREG(rs1));
3792 gen_op_load_fpr_QT1(QFPREG(rs2));
3793 gen_op_fcmpq(rd & 3);
3794 break;
3795 case 0x55: /* fcmpes, V9 %fcc */
3796 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3797 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3798 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3799 break;
3800 case 0x56: /* fcmped, V9 %fcc */
3801 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3802 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3803 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3804 break;
3805 case 0x57: /* fcmpeq, V9 %fcc */
3806 CHECK_FPU_FEATURE(dc, FLOAT128);
3807 gen_op_load_fpr_QT0(QFPREG(rs1));
3808 gen_op_load_fpr_QT1(QFPREG(rs2));
3809 gen_op_fcmpeq(rd & 3);
3810 break;
3811 default:
3812 goto illegal_insn;
3814 } else if (xop == 0x2) {
3815 TCGv dst = gen_dest_gpr(dc, rd);
3816 rs1 = GET_FIELD(insn, 13, 17);
3817 if (rs1 == 0) {
3818 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3819 if (IS_IMM) { /* immediate */
3820 simm = GET_FIELDs(insn, 19, 31);
3821 tcg_gen_movi_tl(dst, simm);
3822 gen_store_gpr(dc, rd, dst);
3823 } else { /* register */
3824 rs2 = GET_FIELD(insn, 27, 31);
3825 if (rs2 == 0) {
3826 tcg_gen_movi_tl(dst, 0);
3827 gen_store_gpr(dc, rd, dst);
3828 } else {
3829 cpu_src2 = gen_load_gpr(dc, rs2);
3830 gen_store_gpr(dc, rd, cpu_src2);
3833 } else {
3834 cpu_src1 = get_src1(dc, insn);
3835 if (IS_IMM) { /* immediate */
3836 simm = GET_FIELDs(insn, 19, 31);
3837 tcg_gen_ori_tl(dst, cpu_src1, simm);
3838 gen_store_gpr(dc, rd, dst);
3839 } else { /* register */
3840 rs2 = GET_FIELD(insn, 27, 31);
3841 if (rs2 == 0) {
3842 /* mov shortcut: or x, %g0, y -> mov x, y */
3843 gen_store_gpr(dc, rd, cpu_src1);
3844 } else {
3845 cpu_src2 = gen_load_gpr(dc, rs2);
3846 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3847 gen_store_gpr(dc, rd, dst);
3851 #ifdef TARGET_SPARC64
3852 } else if (xop == 0x25) { /* sll, V9 sllx */
3853 cpu_src1 = get_src1(dc, insn);
3854 if (IS_IMM) { /* immediate */
3855 simm = GET_FIELDs(insn, 20, 31);
3856 if (insn & (1 << 12)) {
3857 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3858 } else {
3859 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3861 } else { /* register */
3862 rs2 = GET_FIELD(insn, 27, 31);
3863 cpu_src2 = gen_load_gpr(dc, rs2);
3864 cpu_tmp0 = get_temp_tl(dc);
3865 if (insn & (1 << 12)) {
3866 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3867 } else {
3868 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3870 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3872 gen_store_gpr(dc, rd, cpu_dst);
3873 } else if (xop == 0x26) { /* srl, V9 srlx */
3874 cpu_src1 = get_src1(dc, insn);
3875 if (IS_IMM) { /* immediate */
3876 simm = GET_FIELDs(insn, 20, 31);
3877 if (insn & (1 << 12)) {
3878 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3879 } else {
3880 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3881 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3883 } else { /* register */
3884 rs2 = GET_FIELD(insn, 27, 31);
3885 cpu_src2 = gen_load_gpr(dc, rs2);
3886 cpu_tmp0 = get_temp_tl(dc);
3887 if (insn & (1 << 12)) {
3888 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3889 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3890 } else {
3891 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3892 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3893 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3896 gen_store_gpr(dc, rd, cpu_dst);
3897 } else if (xop == 0x27) { /* sra, V9 srax */
3898 cpu_src1 = get_src1(dc, insn);
3899 if (IS_IMM) { /* immediate */
3900 simm = GET_FIELDs(insn, 20, 31);
3901 if (insn & (1 << 12)) {
3902 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3903 } else {
3904 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3905 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3907 } else { /* register */
3908 rs2 = GET_FIELD(insn, 27, 31);
3909 cpu_src2 = gen_load_gpr(dc, rs2);
3910 cpu_tmp0 = get_temp_tl(dc);
3911 if (insn & (1 << 12)) {
3912 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3913 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3914 } else {
3915 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3916 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3917 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3920 gen_store_gpr(dc, rd, cpu_dst);
3921 #endif
3922 } else if (xop < 0x36) {
3923 if (xop < 0x20) {
3924 cpu_src1 = get_src1(dc, insn);
3925 cpu_src2 = get_src2(dc, insn);
3926 switch (xop & ~0x10) {
3927 case 0x0: /* add */
3928 if (xop & 0x10) {
3929 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3930 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3931 dc->cc_op = CC_OP_ADD;
3932 } else {
3933 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3935 break;
3936 case 0x1: /* and */
3937 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3938 if (xop & 0x10) {
3939 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3940 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3941 dc->cc_op = CC_OP_LOGIC;
3943 break;
3944 case 0x2: /* or */
3945 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3946 if (xop & 0x10) {
3947 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3948 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3949 dc->cc_op = CC_OP_LOGIC;
3951 break;
3952 case 0x3: /* xor */
3953 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3954 if (xop & 0x10) {
3955 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3956 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3957 dc->cc_op = CC_OP_LOGIC;
3959 break;
3960 case 0x4: /* sub */
3961 if (xop & 0x10) {
3962 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3963 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3964 dc->cc_op = CC_OP_SUB;
3965 } else {
3966 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3968 break;
3969 case 0x5: /* andn */
3970 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3971 if (xop & 0x10) {
3972 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3973 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3974 dc->cc_op = CC_OP_LOGIC;
3976 break;
3977 case 0x6: /* orn */
3978 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3979 if (xop & 0x10) {
3980 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3981 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3982 dc->cc_op = CC_OP_LOGIC;
3984 break;
3985 case 0x7: /* xorn */
3986 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3987 if (xop & 0x10) {
3988 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3989 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3990 dc->cc_op = CC_OP_LOGIC;
3992 break;
3993 case 0x8: /* addx, V9 addc */
3994 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3995 (xop & 0x10));
3996 break;
3997 #ifdef TARGET_SPARC64
3998 case 0x9: /* V9 mulx */
3999 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4000 break;
4001 #endif
4002 case 0xa: /* umul */
4003 CHECK_IU_FEATURE(dc, MUL);
4004 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4005 if (xop & 0x10) {
4006 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4007 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4008 dc->cc_op = CC_OP_LOGIC;
4010 break;
4011 case 0xb: /* smul */
4012 CHECK_IU_FEATURE(dc, MUL);
4013 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4014 if (xop & 0x10) {
4015 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4016 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4017 dc->cc_op = CC_OP_LOGIC;
4019 break;
4020 case 0xc: /* subx, V9 subc */
4021 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4022 (xop & 0x10));
4023 break;
4024 #ifdef TARGET_SPARC64
4025 case 0xd: /* V9 udivx */
4026 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4027 break;
4028 #endif
4029 case 0xe: /* udiv */
4030 CHECK_IU_FEATURE(dc, DIV);
4031 if (xop & 0x10) {
4032 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4033 cpu_src2);
4034 dc->cc_op = CC_OP_DIV;
4035 } else {
4036 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4037 cpu_src2);
4039 break;
4040 case 0xf: /* sdiv */
4041 CHECK_IU_FEATURE(dc, DIV);
4042 if (xop & 0x10) {
4043 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4044 cpu_src2);
4045 dc->cc_op = CC_OP_DIV;
4046 } else {
4047 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4048 cpu_src2);
4050 break;
4051 default:
4052 goto illegal_insn;
4054 gen_store_gpr(dc, rd, cpu_dst);
4055 } else {
4056 cpu_src1 = get_src1(dc, insn);
4057 cpu_src2 = get_src2(dc, insn);
4058 switch (xop) {
4059 case 0x20: /* taddcc */
4060 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4061 gen_store_gpr(dc, rd, cpu_dst);
4062 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4063 dc->cc_op = CC_OP_TADD;
4064 break;
4065 case 0x21: /* tsubcc */
4066 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4067 gen_store_gpr(dc, rd, cpu_dst);
4068 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4069 dc->cc_op = CC_OP_TSUB;
4070 break;
4071 case 0x22: /* taddcctv */
4072 gen_helper_taddcctv(cpu_dst, cpu_env,
4073 cpu_src1, cpu_src2);
4074 gen_store_gpr(dc, rd, cpu_dst);
4075 dc->cc_op = CC_OP_TADDTV;
4076 break;
4077 case 0x23: /* tsubcctv */
4078 gen_helper_tsubcctv(cpu_dst, cpu_env,
4079 cpu_src1, cpu_src2);
4080 gen_store_gpr(dc, rd, cpu_dst);
4081 dc->cc_op = CC_OP_TSUBTV;
4082 break;
4083 case 0x24: /* mulscc */
4084 update_psr(dc);
4085 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4086 gen_store_gpr(dc, rd, cpu_dst);
4087 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4088 dc->cc_op = CC_OP_ADD;
4089 break;
4090 #ifndef TARGET_SPARC64
4091 case 0x25: /* sll */
4092 if (IS_IMM) { /* immediate */
4093 simm = GET_FIELDs(insn, 20, 31);
4094 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4095 } else { /* register */
4096 cpu_tmp0 = get_temp_tl(dc);
4097 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4098 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4100 gen_store_gpr(dc, rd, cpu_dst);
4101 break;
4102 case 0x26: /* srl */
4103 if (IS_IMM) { /* immediate */
4104 simm = GET_FIELDs(insn, 20, 31);
4105 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4106 } else { /* register */
4107 cpu_tmp0 = get_temp_tl(dc);
4108 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4109 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4111 gen_store_gpr(dc, rd, cpu_dst);
4112 break;
4113 case 0x27: /* sra */
4114 if (IS_IMM) { /* immediate */
4115 simm = GET_FIELDs(insn, 20, 31);
4116 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4117 } else { /* register */
4118 cpu_tmp0 = get_temp_tl(dc);
4119 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4120 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4122 gen_store_gpr(dc, rd, cpu_dst);
4123 break;
4124 #endif
4125 case 0x30:
4127 cpu_tmp0 = get_temp_tl(dc);
4128 switch(rd) {
4129 case 0: /* wry */
4130 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4131 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4132 break;
4133 #ifndef TARGET_SPARC64
4134 case 0x01 ... 0x0f: /* undefined in the
4135 SPARCv8 manual, nop
4136 on the microSPARC
4137 II */
4138 case 0x10 ... 0x1f: /* implementation-dependent
4139 in the SPARCv8
4140 manual, nop on the
4141 microSPARC II */
4142 if ((rd == 0x13) && (dc->def->features &
4143 CPU_FEATURE_POWERDOWN)) {
4144 /* LEON3 power-down */
4145 save_state(dc);
4146 gen_helper_power_down(cpu_env);
4148 break;
4149 #else
4150 case 0x2: /* V9 wrccr */
4151 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4152 gen_helper_wrccr(cpu_env, cpu_tmp0);
4153 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4154 dc->cc_op = CC_OP_FLAGS;
4155 break;
4156 case 0x3: /* V9 wrasi */
4157 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4158 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4159 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4160 offsetof(CPUSPARCState, asi));
4161 /* End TB to notice changed ASI. */
4162 save_state(dc);
4163 gen_op_next_insn();
4164 tcg_gen_exit_tb(0);
4165 dc->is_br = 1;
4166 break;
4167 case 0x6: /* V9 wrfprs */
4168 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4169 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4170 dc->fprs_dirty = 0;
4171 save_state(dc);
4172 gen_op_next_insn();
4173 tcg_gen_exit_tb(0);
4174 dc->is_br = 1;
4175 break;
4176 case 0xf: /* V9 sir, nop if user */
4177 #if !defined(CONFIG_USER_ONLY)
4178 if (supervisor(dc)) {
4179 ; // XXX
4181 #endif
4182 break;
4183 case 0x13: /* Graphics Status */
4184 if (gen_trap_ifnofpu(dc)) {
4185 goto jmp_insn;
4187 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4188 break;
4189 case 0x14: /* Softint set */
4190 if (!supervisor(dc))
4191 goto illegal_insn;
4192 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4193 gen_helper_set_softint(cpu_env, cpu_tmp0);
4194 break;
4195 case 0x15: /* Softint clear */
4196 if (!supervisor(dc))
4197 goto illegal_insn;
4198 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4199 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4200 break;
4201 case 0x16: /* Softint write */
4202 if (!supervisor(dc))
4203 goto illegal_insn;
4204 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4205 gen_helper_write_softint(cpu_env, cpu_tmp0);
4206 break;
4207 case 0x17: /* Tick compare */
4208 #if !defined(CONFIG_USER_ONLY)
4209 if (!supervisor(dc))
4210 goto illegal_insn;
4211 #endif
4213 TCGv_ptr r_tickptr;
4215 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4216 cpu_src2);
4217 r_tickptr = tcg_temp_new_ptr();
4218 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4219 offsetof(CPUSPARCState, tick));
4220 gen_helper_tick_set_limit(r_tickptr,
4221 cpu_tick_cmpr);
4222 tcg_temp_free_ptr(r_tickptr);
4224 break;
4225 case 0x18: /* System tick */
4226 #if !defined(CONFIG_USER_ONLY)
4227 if (!supervisor(dc))
4228 goto illegal_insn;
4229 #endif
4231 TCGv_ptr r_tickptr;
4233 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4234 cpu_src2);
4235 r_tickptr = tcg_temp_new_ptr();
4236 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4237 offsetof(CPUSPARCState, stick));
4238 gen_helper_tick_set_count(r_tickptr,
4239 cpu_tmp0);
4240 tcg_temp_free_ptr(r_tickptr);
4242 break;
4243 case 0x19: /* System tick compare */
4244 #if !defined(CONFIG_USER_ONLY)
4245 if (!supervisor(dc))
4246 goto illegal_insn;
4247 #endif
4249 TCGv_ptr r_tickptr;
4251 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4252 cpu_src2);
4253 r_tickptr = tcg_temp_new_ptr();
4254 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4255 offsetof(CPUSPARCState, stick));
4256 gen_helper_tick_set_limit(r_tickptr,
4257 cpu_stick_cmpr);
4258 tcg_temp_free_ptr(r_tickptr);
4260 break;
4262 case 0x10: /* Performance Control */
4263 case 0x11: /* Performance Instrumentation
4264 Counter */
4265 case 0x12: /* Dispatch Control */
4266 #endif
4267 default:
4268 goto illegal_insn;
4271 break;
4272 #if !defined(CONFIG_USER_ONLY)
4273 case 0x31: /* wrpsr, V9 saved, restored */
4275 if (!supervisor(dc))
4276 goto priv_insn;
4277 #ifdef TARGET_SPARC64
4278 switch (rd) {
4279 case 0:
4280 gen_helper_saved(cpu_env);
4281 break;
4282 case 1:
4283 gen_helper_restored(cpu_env);
4284 break;
4285 case 2: /* UA2005 allclean */
4286 case 3: /* UA2005 otherw */
4287 case 4: /* UA2005 normalw */
4288 case 5: /* UA2005 invalw */
4289 // XXX
4290 default:
4291 goto illegal_insn;
4293 #else
4294 cpu_tmp0 = get_temp_tl(dc);
4295 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4296 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4297 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4298 dc->cc_op = CC_OP_FLAGS;
4299 save_state(dc);
4300 gen_op_next_insn();
4301 tcg_gen_exit_tb(0);
4302 dc->is_br = 1;
4303 #endif
4305 break;
4306 case 0x32: /* wrwim, V9 wrpr */
4308 if (!supervisor(dc))
4309 goto priv_insn;
4310 cpu_tmp0 = get_temp_tl(dc);
4311 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4312 #ifdef TARGET_SPARC64
4313 switch (rd) {
4314 case 0: // tpc
4316 TCGv_ptr r_tsptr;
4318 r_tsptr = tcg_temp_new_ptr();
4319 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4320 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4321 offsetof(trap_state, tpc));
4322 tcg_temp_free_ptr(r_tsptr);
4324 break;
4325 case 1: // tnpc
4327 TCGv_ptr r_tsptr;
4329 r_tsptr = tcg_temp_new_ptr();
4330 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4331 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4332 offsetof(trap_state, tnpc));
4333 tcg_temp_free_ptr(r_tsptr);
4335 break;
4336 case 2: // tstate
4338 TCGv_ptr r_tsptr;
4340 r_tsptr = tcg_temp_new_ptr();
4341 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4342 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4343 offsetof(trap_state,
4344 tstate));
4345 tcg_temp_free_ptr(r_tsptr);
4347 break;
4348 case 3: // tt
4350 TCGv_ptr r_tsptr;
4352 r_tsptr = tcg_temp_new_ptr();
4353 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4354 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4355 offsetof(trap_state, tt));
4356 tcg_temp_free_ptr(r_tsptr);
4358 break;
4359 case 4: // tick
4361 TCGv_ptr r_tickptr;
4363 r_tickptr = tcg_temp_new_ptr();
4364 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4365 offsetof(CPUSPARCState, tick));
4366 gen_helper_tick_set_count(r_tickptr,
4367 cpu_tmp0);
4368 tcg_temp_free_ptr(r_tickptr);
4370 break;
4371 case 5: // tba
4372 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4373 break;
4374 case 6: // pstate
4375 save_state(dc);
4376 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4377 dc->npc = DYNAMIC_PC;
4378 break;
4379 case 7: // tl
4380 save_state(dc);
4381 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4382 offsetof(CPUSPARCState, tl));
4383 dc->npc = DYNAMIC_PC;
4384 break;
4385 case 8: // pil
4386 gen_helper_wrpil(cpu_env, cpu_tmp0);
4387 break;
4388 case 9: // cwp
4389 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4390 break;
4391 case 10: // cansave
4392 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4393 offsetof(CPUSPARCState,
4394 cansave));
4395 break;
4396 case 11: // canrestore
4397 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4398 offsetof(CPUSPARCState,
4399 canrestore));
4400 break;
4401 case 12: // cleanwin
4402 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4403 offsetof(CPUSPARCState,
4404 cleanwin));
4405 break;
4406 case 13: // otherwin
4407 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4408 offsetof(CPUSPARCState,
4409 otherwin));
4410 break;
4411 case 14: // wstate
4412 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4413 offsetof(CPUSPARCState,
4414 wstate));
4415 break;
4416 case 16: // UA2005 gl
4417 CHECK_IU_FEATURE(dc, GL);
4418 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4419 offsetof(CPUSPARCState, gl));
4420 break;
4421 case 26: // UA2005 strand status
4422 CHECK_IU_FEATURE(dc, HYPV);
4423 if (!hypervisor(dc))
4424 goto priv_insn;
4425 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4426 break;
4427 default:
4428 goto illegal_insn;
4430 #else
4431 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4432 if (dc->def->nwindows != 32) {
4433 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4434 (1 << dc->def->nwindows) - 1);
4436 #endif
4438 break;
4439 case 0x33: /* wrtbr, UA2005 wrhpr */
4441 #ifndef TARGET_SPARC64
4442 if (!supervisor(dc))
4443 goto priv_insn;
4444 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4445 #else
4446 CHECK_IU_FEATURE(dc, HYPV);
4447 if (!hypervisor(dc))
4448 goto priv_insn;
4449 cpu_tmp0 = get_temp_tl(dc);
4450 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4451 switch (rd) {
4452 case 0: // hpstate
4453 // XXX gen_op_wrhpstate();
4454 save_state(dc);
4455 gen_op_next_insn();
4456 tcg_gen_exit_tb(0);
4457 dc->is_br = 1;
4458 break;
4459 case 1: // htstate
4460 // XXX gen_op_wrhtstate();
4461 break;
4462 case 3: // hintp
4463 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4464 break;
4465 case 5: // htba
4466 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4467 break;
4468 case 31: // hstick_cmpr
4470 TCGv_ptr r_tickptr;
4472 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4473 r_tickptr = tcg_temp_new_ptr();
4474 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4475 offsetof(CPUSPARCState, hstick));
4476 gen_helper_tick_set_limit(r_tickptr,
4477 cpu_hstick_cmpr);
4478 tcg_temp_free_ptr(r_tickptr);
4480 break;
4481 case 6: // hver readonly
4482 default:
4483 goto illegal_insn;
4485 #endif
4487 break;
4488 #endif
4489 #ifdef TARGET_SPARC64
4490 case 0x2c: /* V9 movcc */
4492 int cc = GET_FIELD_SP(insn, 11, 12);
4493 int cond = GET_FIELD_SP(insn, 14, 17);
4494 DisasCompare cmp;
4495 TCGv dst;
4497 if (insn & (1 << 18)) {
4498 if (cc == 0) {
4499 gen_compare(&cmp, 0, cond, dc);
4500 } else if (cc == 2) {
4501 gen_compare(&cmp, 1, cond, dc);
4502 } else {
4503 goto illegal_insn;
4505 } else {
4506 gen_fcompare(&cmp, cc, cond);
4509 /* The get_src2 above loaded the normal 13-bit
4510 immediate field, not the 11-bit field we have
4511 in movcc. But it did handle the reg case. */
4512 if (IS_IMM) {
4513 simm = GET_FIELD_SPs(insn, 0, 10);
4514 tcg_gen_movi_tl(cpu_src2, simm);
4517 dst = gen_load_gpr(dc, rd);
4518 tcg_gen_movcond_tl(cmp.cond, dst,
4519 cmp.c1, cmp.c2,
4520 cpu_src2, dst);
4521 free_compare(&cmp);
4522 gen_store_gpr(dc, rd, dst);
4523 break;
4525 case 0x2d: /* V9 sdivx */
4526 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4527 gen_store_gpr(dc, rd, cpu_dst);
4528 break;
4529 case 0x2e: /* V9 popc */
4530 gen_helper_popc(cpu_dst, cpu_src2);
4531 gen_store_gpr(dc, rd, cpu_dst);
4532 break;
4533 case 0x2f: /* V9 movr */
4535 int cond = GET_FIELD_SP(insn, 10, 12);
4536 DisasCompare cmp;
4537 TCGv dst;
4539 gen_compare_reg(&cmp, cond, cpu_src1);
4541 /* The get_src2 above loaded the normal 13-bit
4542 immediate field, not the 10-bit field we have
4543 in movr. But it did handle the reg case. */
4544 if (IS_IMM) {
4545 simm = GET_FIELD_SPs(insn, 0, 9);
4546 tcg_gen_movi_tl(cpu_src2, simm);
4549 dst = gen_load_gpr(dc, rd);
4550 tcg_gen_movcond_tl(cmp.cond, dst,
4551 cmp.c1, cmp.c2,
4552 cpu_src2, dst);
4553 free_compare(&cmp);
4554 gen_store_gpr(dc, rd, dst);
4555 break;
4557 #endif
4558 default:
4559 goto illegal_insn;
4562 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4563 #ifdef TARGET_SPARC64
4564 int opf = GET_FIELD_SP(insn, 5, 13);
4565 rs1 = GET_FIELD(insn, 13, 17);
4566 rs2 = GET_FIELD(insn, 27, 31);
4567 if (gen_trap_ifnofpu(dc)) {
4568 goto jmp_insn;
4571 switch (opf) {
4572 case 0x000: /* VIS I edge8cc */
4573 CHECK_FPU_FEATURE(dc, VIS1);
4574 cpu_src1 = gen_load_gpr(dc, rs1);
4575 cpu_src2 = gen_load_gpr(dc, rs2);
4576 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4577 gen_store_gpr(dc, rd, cpu_dst);
4578 break;
4579 case 0x001: /* VIS II edge8n */
4580 CHECK_FPU_FEATURE(dc, VIS2);
4581 cpu_src1 = gen_load_gpr(dc, rs1);
4582 cpu_src2 = gen_load_gpr(dc, rs2);
4583 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4584 gen_store_gpr(dc, rd, cpu_dst);
4585 break;
4586 case 0x002: /* VIS I edge8lcc */
4587 CHECK_FPU_FEATURE(dc, VIS1);
4588 cpu_src1 = gen_load_gpr(dc, rs1);
4589 cpu_src2 = gen_load_gpr(dc, rs2);
4590 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4591 gen_store_gpr(dc, rd, cpu_dst);
4592 break;
4593 case 0x003: /* VIS II edge8ln */
4594 CHECK_FPU_FEATURE(dc, VIS2);
4595 cpu_src1 = gen_load_gpr(dc, rs1);
4596 cpu_src2 = gen_load_gpr(dc, rs2);
4597 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4598 gen_store_gpr(dc, rd, cpu_dst);
4599 break;
4600 case 0x004: /* VIS I edge16cc */
4601 CHECK_FPU_FEATURE(dc, VIS1);
4602 cpu_src1 = gen_load_gpr(dc, rs1);
4603 cpu_src2 = gen_load_gpr(dc, rs2);
4604 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4605 gen_store_gpr(dc, rd, cpu_dst);
4606 break;
4607 case 0x005: /* VIS II edge16n */
4608 CHECK_FPU_FEATURE(dc, VIS2);
4609 cpu_src1 = gen_load_gpr(dc, rs1);
4610 cpu_src2 = gen_load_gpr(dc, rs2);
4611 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4612 gen_store_gpr(dc, rd, cpu_dst);
4613 break;
4614 case 0x006: /* VIS I edge16lcc */
4615 CHECK_FPU_FEATURE(dc, VIS1);
4616 cpu_src1 = gen_load_gpr(dc, rs1);
4617 cpu_src2 = gen_load_gpr(dc, rs2);
4618 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4619 gen_store_gpr(dc, rd, cpu_dst);
4620 break;
4621 case 0x007: /* VIS II edge16ln */
4622 CHECK_FPU_FEATURE(dc, VIS2);
4623 cpu_src1 = gen_load_gpr(dc, rs1);
4624 cpu_src2 = gen_load_gpr(dc, rs2);
4625 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4626 gen_store_gpr(dc, rd, cpu_dst);
4627 break;
4628 case 0x008: /* VIS I edge32cc */
4629 CHECK_FPU_FEATURE(dc, VIS1);
4630 cpu_src1 = gen_load_gpr(dc, rs1);
4631 cpu_src2 = gen_load_gpr(dc, rs2);
4632 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4633 gen_store_gpr(dc, rd, cpu_dst);
4634 break;
4635 case 0x009: /* VIS II edge32n */
4636 CHECK_FPU_FEATURE(dc, VIS2);
4637 cpu_src1 = gen_load_gpr(dc, rs1);
4638 cpu_src2 = gen_load_gpr(dc, rs2);
4639 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4640 gen_store_gpr(dc, rd, cpu_dst);
4641 break;
4642 case 0x00a: /* VIS I edge32lcc */
4643 CHECK_FPU_FEATURE(dc, VIS1);
4644 cpu_src1 = gen_load_gpr(dc, rs1);
4645 cpu_src2 = gen_load_gpr(dc, rs2);
4646 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4647 gen_store_gpr(dc, rd, cpu_dst);
4648 break;
4649 case 0x00b: /* VIS II edge32ln */
4650 CHECK_FPU_FEATURE(dc, VIS2);
4651 cpu_src1 = gen_load_gpr(dc, rs1);
4652 cpu_src2 = gen_load_gpr(dc, rs2);
4653 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4654 gen_store_gpr(dc, rd, cpu_dst);
4655 break;
4656 case 0x010: /* VIS I array8 */
4657 CHECK_FPU_FEATURE(dc, VIS1);
4658 cpu_src1 = gen_load_gpr(dc, rs1);
4659 cpu_src2 = gen_load_gpr(dc, rs2);
4660 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4661 gen_store_gpr(dc, rd, cpu_dst);
4662 break;
4663 case 0x012: /* VIS I array16 */
4664 CHECK_FPU_FEATURE(dc, VIS1);
4665 cpu_src1 = gen_load_gpr(dc, rs1);
4666 cpu_src2 = gen_load_gpr(dc, rs2);
4667 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4668 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4669 gen_store_gpr(dc, rd, cpu_dst);
4670 break;
4671 case 0x014: /* VIS I array32 */
4672 CHECK_FPU_FEATURE(dc, VIS1);
4673 cpu_src1 = gen_load_gpr(dc, rs1);
4674 cpu_src2 = gen_load_gpr(dc, rs2);
4675 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4676 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4677 gen_store_gpr(dc, rd, cpu_dst);
4678 break;
4679 case 0x018: /* VIS I alignaddr */
4680 CHECK_FPU_FEATURE(dc, VIS1);
4681 cpu_src1 = gen_load_gpr(dc, rs1);
4682 cpu_src2 = gen_load_gpr(dc, rs2);
4683 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4684 gen_store_gpr(dc, rd, cpu_dst);
4685 break;
4686 case 0x01a: /* VIS I alignaddrl */
4687 CHECK_FPU_FEATURE(dc, VIS1);
4688 cpu_src1 = gen_load_gpr(dc, rs1);
4689 cpu_src2 = gen_load_gpr(dc, rs2);
4690 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4691 gen_store_gpr(dc, rd, cpu_dst);
4692 break;
4693 case 0x019: /* VIS II bmask */
4694 CHECK_FPU_FEATURE(dc, VIS2);
4695 cpu_src1 = gen_load_gpr(dc, rs1);
4696 cpu_src2 = gen_load_gpr(dc, rs2);
4697 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4698 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4699 gen_store_gpr(dc, rd, cpu_dst);
4700 break;
4701 case 0x020: /* VIS I fcmple16 */
4702 CHECK_FPU_FEATURE(dc, VIS1);
4703 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4704 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4705 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4706 gen_store_gpr(dc, rd, cpu_dst);
4707 break;
4708 case 0x022: /* VIS I fcmpne16 */
4709 CHECK_FPU_FEATURE(dc, VIS1);
4710 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4711 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4712 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4713 gen_store_gpr(dc, rd, cpu_dst);
4714 break;
4715 case 0x024: /* VIS I fcmple32 */
4716 CHECK_FPU_FEATURE(dc, VIS1);
4717 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4718 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4719 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4720 gen_store_gpr(dc, rd, cpu_dst);
4721 break;
4722 case 0x026: /* VIS I fcmpne32 */
4723 CHECK_FPU_FEATURE(dc, VIS1);
4724 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4725 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4726 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4727 gen_store_gpr(dc, rd, cpu_dst);
4728 break;
4729 case 0x028: /* VIS I fcmpgt16 */
4730 CHECK_FPU_FEATURE(dc, VIS1);
4731 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4732 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4733 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4734 gen_store_gpr(dc, rd, cpu_dst);
4735 break;
4736 case 0x02a: /* VIS I fcmpeq16 */
4737 CHECK_FPU_FEATURE(dc, VIS1);
4738 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4739 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4740 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4741 gen_store_gpr(dc, rd, cpu_dst);
4742 break;
4743 case 0x02c: /* VIS I fcmpgt32 */
4744 CHECK_FPU_FEATURE(dc, VIS1);
4745 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4746 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4747 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4748 gen_store_gpr(dc, rd, cpu_dst);
4749 break;
4750 case 0x02e: /* VIS I fcmpeq32 */
4751 CHECK_FPU_FEATURE(dc, VIS1);
4752 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4753 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4754 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4755 gen_store_gpr(dc, rd, cpu_dst);
4756 break;
4757 case 0x031: /* VIS I fmul8x16 */
4758 CHECK_FPU_FEATURE(dc, VIS1);
4759 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4760 break;
4761 case 0x033: /* VIS I fmul8x16au */
4762 CHECK_FPU_FEATURE(dc, VIS1);
4763 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4764 break;
4765 case 0x035: /* VIS I fmul8x16al */
4766 CHECK_FPU_FEATURE(dc, VIS1);
4767 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4768 break;
4769 case 0x036: /* VIS I fmul8sux16 */
4770 CHECK_FPU_FEATURE(dc, VIS1);
4771 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4772 break;
4773 case 0x037: /* VIS I fmul8ulx16 */
4774 CHECK_FPU_FEATURE(dc, VIS1);
4775 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4776 break;
4777 case 0x038: /* VIS I fmuld8sux16 */
4778 CHECK_FPU_FEATURE(dc, VIS1);
4779 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4780 break;
4781 case 0x039: /* VIS I fmuld8ulx16 */
4782 CHECK_FPU_FEATURE(dc, VIS1);
4783 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4784 break;
4785 case 0x03a: /* VIS I fpack32 */
4786 CHECK_FPU_FEATURE(dc, VIS1);
4787 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4788 break;
4789 case 0x03b: /* VIS I fpack16 */
4790 CHECK_FPU_FEATURE(dc, VIS1);
4791 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4792 cpu_dst_32 = gen_dest_fpr_F(dc);
4793 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4794 gen_store_fpr_F(dc, rd, cpu_dst_32);
4795 break;
4796 case 0x03d: /* VIS I fpackfix */
4797 CHECK_FPU_FEATURE(dc, VIS1);
4798 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4799 cpu_dst_32 = gen_dest_fpr_F(dc);
4800 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4801 gen_store_fpr_F(dc, rd, cpu_dst_32);
4802 break;
4803 case 0x03e: /* VIS I pdist */
4804 CHECK_FPU_FEATURE(dc, VIS1);
4805 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4806 break;
4807 case 0x048: /* VIS I faligndata */
4808 CHECK_FPU_FEATURE(dc, VIS1);
4809 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4810 break;
4811 case 0x04b: /* VIS I fpmerge */
4812 CHECK_FPU_FEATURE(dc, VIS1);
4813 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4814 break;
4815 case 0x04c: /* VIS II bshuffle */
4816 CHECK_FPU_FEATURE(dc, VIS2);
4817 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4818 break;
4819 case 0x04d: /* VIS I fexpand */
4820 CHECK_FPU_FEATURE(dc, VIS1);
4821 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4822 break;
4823 case 0x050: /* VIS I fpadd16 */
4824 CHECK_FPU_FEATURE(dc, VIS1);
4825 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4826 break;
4827 case 0x051: /* VIS I fpadd16s */
4828 CHECK_FPU_FEATURE(dc, VIS1);
4829 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4830 break;
4831 case 0x052: /* VIS I fpadd32 */
4832 CHECK_FPU_FEATURE(dc, VIS1);
4833 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4834 break;
4835 case 0x053: /* VIS I fpadd32s */
4836 CHECK_FPU_FEATURE(dc, VIS1);
4837 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4838 break;
4839 case 0x054: /* VIS I fpsub16 */
4840 CHECK_FPU_FEATURE(dc, VIS1);
4841 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4842 break;
4843 case 0x055: /* VIS I fpsub16s */
4844 CHECK_FPU_FEATURE(dc, VIS1);
4845 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4846 break;
4847 case 0x056: /* VIS I fpsub32 */
4848 CHECK_FPU_FEATURE(dc, VIS1);
4849 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4850 break;
4851 case 0x057: /* VIS I fpsub32s */
4852 CHECK_FPU_FEATURE(dc, VIS1);
4853 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4854 break;
4855 case 0x060: /* VIS I fzero */
4856 CHECK_FPU_FEATURE(dc, VIS1);
4857 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4858 tcg_gen_movi_i64(cpu_dst_64, 0);
4859 gen_store_fpr_D(dc, rd, cpu_dst_64);
4860 break;
4861 case 0x061: /* VIS I fzeros */
4862 CHECK_FPU_FEATURE(dc, VIS1);
4863 cpu_dst_32 = gen_dest_fpr_F(dc);
4864 tcg_gen_movi_i32(cpu_dst_32, 0);
4865 gen_store_fpr_F(dc, rd, cpu_dst_32);
4866 break;
4867 case 0x062: /* VIS I fnor */
4868 CHECK_FPU_FEATURE(dc, VIS1);
4869 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4870 break;
4871 case 0x063: /* VIS I fnors */
4872 CHECK_FPU_FEATURE(dc, VIS1);
4873 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4874 break;
4875 case 0x064: /* VIS I fandnot2 */
4876 CHECK_FPU_FEATURE(dc, VIS1);
4877 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4878 break;
4879 case 0x065: /* VIS I fandnot2s */
4880 CHECK_FPU_FEATURE(dc, VIS1);
4881 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4882 break;
4883 case 0x066: /* VIS I fnot2 */
4884 CHECK_FPU_FEATURE(dc, VIS1);
4885 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4886 break;
4887 case 0x067: /* VIS I fnot2s */
4888 CHECK_FPU_FEATURE(dc, VIS1);
4889 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4890 break;
4891 case 0x068: /* VIS I fandnot1 */
4892 CHECK_FPU_FEATURE(dc, VIS1);
4893 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4894 break;
4895 case 0x069: /* VIS I fandnot1s */
4896 CHECK_FPU_FEATURE(dc, VIS1);
4897 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4898 break;
4899 case 0x06a: /* VIS I fnot1 */
4900 CHECK_FPU_FEATURE(dc, VIS1);
4901 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4902 break;
4903 case 0x06b: /* VIS I fnot1s */
4904 CHECK_FPU_FEATURE(dc, VIS1);
4905 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4906 break;
4907 case 0x06c: /* VIS I fxor */
4908 CHECK_FPU_FEATURE(dc, VIS1);
4909 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4910 break;
4911 case 0x06d: /* VIS I fxors */
4912 CHECK_FPU_FEATURE(dc, VIS1);
4913 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4914 break;
4915 case 0x06e: /* VIS I fnand */
4916 CHECK_FPU_FEATURE(dc, VIS1);
4917 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4918 break;
4919 case 0x06f: /* VIS I fnands */
4920 CHECK_FPU_FEATURE(dc, VIS1);
4921 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4922 break;
4923 case 0x070: /* VIS I fand */
4924 CHECK_FPU_FEATURE(dc, VIS1);
4925 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4926 break;
4927 case 0x071: /* VIS I fands */
4928 CHECK_FPU_FEATURE(dc, VIS1);
4929 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4930 break;
4931 case 0x072: /* VIS I fxnor */
4932 CHECK_FPU_FEATURE(dc, VIS1);
4933 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4934 break;
4935 case 0x073: /* VIS I fxnors */
4936 CHECK_FPU_FEATURE(dc, VIS1);
4937 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4938 break;
4939 case 0x074: /* VIS I fsrc1 */
4940 CHECK_FPU_FEATURE(dc, VIS1);
4941 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4942 gen_store_fpr_D(dc, rd, cpu_src1_64);
4943 break;
4944 case 0x075: /* VIS I fsrc1s */
4945 CHECK_FPU_FEATURE(dc, VIS1);
4946 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4947 gen_store_fpr_F(dc, rd, cpu_src1_32);
4948 break;
4949 case 0x076: /* VIS I fornot2 */
4950 CHECK_FPU_FEATURE(dc, VIS1);
4951 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4952 break;
4953 case 0x077: /* VIS I fornot2s */
4954 CHECK_FPU_FEATURE(dc, VIS1);
4955 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4956 break;
4957 case 0x078: /* VIS I fsrc2 */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4960 gen_store_fpr_D(dc, rd, cpu_src1_64);
4961 break;
4962 case 0x079: /* VIS I fsrc2s */
4963 CHECK_FPU_FEATURE(dc, VIS1);
4964 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4965 gen_store_fpr_F(dc, rd, cpu_src1_32);
4966 break;
4967 case 0x07a: /* VIS I fornot1 */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4970 break;
4971 case 0x07b: /* VIS I fornot1s */
4972 CHECK_FPU_FEATURE(dc, VIS1);
4973 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4974 break;
4975 case 0x07c: /* VIS I for */
4976 CHECK_FPU_FEATURE(dc, VIS1);
4977 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4978 break;
4979 case 0x07d: /* VIS I fors */
4980 CHECK_FPU_FEATURE(dc, VIS1);
4981 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4982 break;
4983 case 0x07e: /* VIS I fone */
4984 CHECK_FPU_FEATURE(dc, VIS1);
4985 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4986 tcg_gen_movi_i64(cpu_dst_64, -1);
4987 gen_store_fpr_D(dc, rd, cpu_dst_64);
4988 break;
4989 case 0x07f: /* VIS I fones */
4990 CHECK_FPU_FEATURE(dc, VIS1);
4991 cpu_dst_32 = gen_dest_fpr_F(dc);
4992 tcg_gen_movi_i32(cpu_dst_32, -1);
4993 gen_store_fpr_F(dc, rd, cpu_dst_32);
4994 break;
4995 case 0x080: /* VIS I shutdown */
4996 case 0x081: /* VIS II siam */
4997 // XXX
4998 goto illegal_insn;
4999 default:
5000 goto illegal_insn;
5002 #else
5003 goto ncp_insn;
5004 #endif
5005 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5006 #ifdef TARGET_SPARC64
5007 goto illegal_insn;
5008 #else
5009 goto ncp_insn;
5010 #endif
5011 #ifdef TARGET_SPARC64
5012 } else if (xop == 0x39) { /* V9 return */
5013 save_state(dc);
5014 cpu_src1 = get_src1(dc, insn);
5015 cpu_tmp0 = get_temp_tl(dc);
5016 if (IS_IMM) { /* immediate */
5017 simm = GET_FIELDs(insn, 19, 31);
5018 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5019 } else { /* register */
5020 rs2 = GET_FIELD(insn, 27, 31);
5021 if (rs2) {
5022 cpu_src2 = gen_load_gpr(dc, rs2);
5023 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5024 } else {
5025 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5028 gen_helper_restore(cpu_env);
5029 gen_mov_pc_npc(dc);
5030 gen_check_align(cpu_tmp0, 3);
5031 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5032 dc->npc = DYNAMIC_PC;
5033 goto jmp_insn;
5034 #endif
5035 } else {
5036 cpu_src1 = get_src1(dc, insn);
5037 cpu_tmp0 = get_temp_tl(dc);
5038 if (IS_IMM) { /* immediate */
5039 simm = GET_FIELDs(insn, 19, 31);
5040 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5041 } else { /* register */
5042 rs2 = GET_FIELD(insn, 27, 31);
5043 if (rs2) {
5044 cpu_src2 = gen_load_gpr(dc, rs2);
5045 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5046 } else {
5047 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5050 switch (xop) {
5051 case 0x38: /* jmpl */
5053 TCGv t = gen_dest_gpr(dc, rd);
5054 tcg_gen_movi_tl(t, dc->pc);
5055 gen_store_gpr(dc, rd, t);
5057 gen_mov_pc_npc(dc);
5058 gen_check_align(cpu_tmp0, 3);
5059 gen_address_mask(dc, cpu_tmp0);
5060 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5061 dc->npc = DYNAMIC_PC;
5063 goto jmp_insn;
5064 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5065 case 0x39: /* rett, V9 return */
5067 if (!supervisor(dc))
5068 goto priv_insn;
5069 gen_mov_pc_npc(dc);
5070 gen_check_align(cpu_tmp0, 3);
5071 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5072 dc->npc = DYNAMIC_PC;
5073 gen_helper_rett(cpu_env);
5075 goto jmp_insn;
5076 #endif
5077 case 0x3b: /* flush */
5078 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5079 goto unimp_flush;
5080 /* nop */
5081 break;
5082 case 0x3c: /* save */
5083 gen_helper_save(cpu_env);
5084 gen_store_gpr(dc, rd, cpu_tmp0);
5085 break;
5086 case 0x3d: /* restore */
5087 gen_helper_restore(cpu_env);
5088 gen_store_gpr(dc, rd, cpu_tmp0);
5089 break;
5090 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5091 case 0x3e: /* V9 done/retry */
5093 switch (rd) {
5094 case 0:
5095 if (!supervisor(dc))
5096 goto priv_insn;
5097 dc->npc = DYNAMIC_PC;
5098 dc->pc = DYNAMIC_PC;
5099 gen_helper_done(cpu_env);
5100 goto jmp_insn;
5101 case 1:
5102 if (!supervisor(dc))
5103 goto priv_insn;
5104 dc->npc = DYNAMIC_PC;
5105 dc->pc = DYNAMIC_PC;
5106 gen_helper_retry(cpu_env);
5107 goto jmp_insn;
5108 default:
5109 goto illegal_insn;
5112 break;
5113 #endif
5114 default:
5115 goto illegal_insn;
5118 break;
5120 break;
5121 case 3: /* load/store instructions */
5123 unsigned int xop = GET_FIELD(insn, 7, 12);
5124 /* ??? gen_address_mask prevents us from using a source
5125 register directly. Always generate a temporary. */
5126 TCGv cpu_addr = get_temp_tl(dc);
5128 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5129 if (xop == 0x3c || xop == 0x3e) {
5130 /* V9 casa/casxa : no offset */
5131 } else if (IS_IMM) { /* immediate */
5132 simm = GET_FIELDs(insn, 19, 31);
5133 if (simm != 0) {
5134 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5136 } else { /* register */
5137 rs2 = GET_FIELD(insn, 27, 31);
5138 if (rs2 != 0) {
5139 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5142 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5143 (xop > 0x17 && xop <= 0x1d ) ||
5144 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5145 TCGv cpu_val = gen_dest_gpr(dc, rd);
5147 switch (xop) {
5148 case 0x0: /* ld, V9 lduw, load unsigned word */
5149 gen_address_mask(dc, cpu_addr);
5150 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5151 break;
5152 case 0x1: /* ldub, load unsigned byte */
5153 gen_address_mask(dc, cpu_addr);
5154 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5155 break;
5156 case 0x2: /* lduh, load unsigned halfword */
5157 gen_address_mask(dc, cpu_addr);
5158 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5159 break;
5160 case 0x3: /* ldd, load double word */
5161 if (rd & 1)
5162 goto illegal_insn;
5163 else {
5164 TCGv_i64 t64;
5166 gen_address_mask(dc, cpu_addr);
5167 t64 = tcg_temp_new_i64();
5168 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5169 tcg_gen_trunc_i64_tl(cpu_val, t64);
5170 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5171 gen_store_gpr(dc, rd + 1, cpu_val);
5172 tcg_gen_shri_i64(t64, t64, 32);
5173 tcg_gen_trunc_i64_tl(cpu_val, t64);
5174 tcg_temp_free_i64(t64);
5175 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5177 break;
5178 case 0x9: /* ldsb, load signed byte */
5179 gen_address_mask(dc, cpu_addr);
5180 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5181 break;
5182 case 0xa: /* ldsh, load signed halfword */
5183 gen_address_mask(dc, cpu_addr);
5184 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5185 break;
5186 case 0xd: /* ldstub -- XXX: should be atomically */
5188 TCGv r_const;
5189 TCGv tmp = tcg_temp_new();
5191 gen_address_mask(dc, cpu_addr);
5192 tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx);
5193 r_const = tcg_const_tl(0xff);
5194 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
5195 tcg_gen_mov_tl(cpu_val, tmp);
5196 tcg_temp_free(r_const);
5197 tcg_temp_free(tmp);
5199 break;
5200 case 0x0f:
5201 /* swap, swap register with memory. Also atomically */
5203 TCGv t0 = get_temp_tl(dc);
5204 CHECK_IU_FEATURE(dc, SWAP);
5205 cpu_src1 = gen_load_gpr(dc, rd);
5206 gen_address_mask(dc, cpu_addr);
5207 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5208 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
5209 tcg_gen_mov_tl(cpu_val, t0);
5211 break;
5212 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5213 case 0x10: /* lda, V9 lduwa, load word alternate */
5214 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5215 break;
5216 case 0x11: /* lduba, load unsigned byte alternate */
5217 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5218 break;
5219 case 0x12: /* lduha, load unsigned halfword alternate */
5220 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5221 break;
5222 case 0x13: /* ldda, load double word alternate */
5223 if (rd & 1) {
5224 goto illegal_insn;
5226 gen_ldda_asi(dc, cpu_addr, insn, rd);
5227 goto skip_move;
5228 case 0x19: /* ldsba, load signed byte alternate */
5229 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5230 break;
5231 case 0x1a: /* ldsha, load signed halfword alternate */
5232 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5233 break;
5234 case 0x1d: /* ldstuba -- XXX: should be atomically */
5235 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5236 break;
5237 case 0x1f: /* swapa, swap reg with alt. memory. Also
5238 atomically */
5239 CHECK_IU_FEATURE(dc, SWAP);
5240 cpu_src1 = gen_load_gpr(dc, rd);
5241 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5242 break;
5244 #ifndef TARGET_SPARC64
5245 case 0x30: /* ldc */
5246 case 0x31: /* ldcsr */
5247 case 0x33: /* lddc */
5248 goto ncp_insn;
5249 #endif
5250 #endif
5251 #ifdef TARGET_SPARC64
5252 case 0x08: /* V9 ldsw */
5253 gen_address_mask(dc, cpu_addr);
5254 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5255 break;
5256 case 0x0b: /* V9 ldx */
5257 gen_address_mask(dc, cpu_addr);
5258 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5259 break;
5260 case 0x18: /* V9 ldswa */
5261 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5262 break;
5263 case 0x1b: /* V9 ldxa */
5264 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5265 break;
5266 case 0x2d: /* V9 prefetch, no effect */
5267 goto skip_move;
5268 case 0x30: /* V9 ldfa */
5269 if (gen_trap_ifnofpu(dc)) {
5270 goto jmp_insn;
5272 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5273 gen_update_fprs_dirty(dc, rd);
5274 goto skip_move;
5275 case 0x33: /* V9 lddfa */
5276 if (gen_trap_ifnofpu(dc)) {
5277 goto jmp_insn;
5279 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5280 gen_update_fprs_dirty(dc, DFPREG(rd));
5281 goto skip_move;
5282 case 0x3d: /* V9 prefetcha, no effect */
5283 goto skip_move;
5284 case 0x32: /* V9 ldqfa */
5285 CHECK_FPU_FEATURE(dc, FLOAT128);
5286 if (gen_trap_ifnofpu(dc)) {
5287 goto jmp_insn;
5289 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5290 gen_update_fprs_dirty(dc, QFPREG(rd));
5291 goto skip_move;
5292 #endif
5293 default:
5294 goto illegal_insn;
5296 gen_store_gpr(dc, rd, cpu_val);
5297 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5298 skip_move: ;
5299 #endif
5300 } else if (xop >= 0x20 && xop < 0x24) {
5301 TCGv t0;
5303 if (gen_trap_ifnofpu(dc)) {
5304 goto jmp_insn;
5306 switch (xop) {
5307 case 0x20: /* ldf, load fpreg */
5308 gen_address_mask(dc, cpu_addr);
5309 t0 = get_temp_tl(dc);
5310 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5311 cpu_dst_32 = gen_dest_fpr_F(dc);
5312 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5313 gen_store_fpr_F(dc, rd, cpu_dst_32);
5314 break;
5315 case 0x21: /* ldfsr, V9 ldxfsr */
5316 #ifdef TARGET_SPARC64
5317 gen_address_mask(dc, cpu_addr);
5318 if (rd == 1) {
5319 TCGv_i64 t64 = tcg_temp_new_i64();
5320 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5321 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5322 tcg_temp_free_i64(t64);
5323 break;
5325 #endif
5326 cpu_dst_32 = get_temp_i32(dc);
5327 t0 = get_temp_tl(dc);
5328 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5329 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5330 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5331 break;
5332 case 0x22: /* ldqf, load quad fpreg */
5334 TCGv_i32 r_const;
5336 CHECK_FPU_FEATURE(dc, FLOAT128);
5337 r_const = tcg_const_i32(dc->mem_idx);
5338 gen_address_mask(dc, cpu_addr);
5339 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
5340 tcg_temp_free_i32(r_const);
5341 gen_op_store_QT0_fpr(QFPREG(rd));
5342 gen_update_fprs_dirty(dc, QFPREG(rd));
5344 break;
5345 case 0x23: /* lddf, load double fpreg */
5346 gen_address_mask(dc, cpu_addr);
5347 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5348 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
5349 gen_store_fpr_D(dc, rd, cpu_dst_64);
5350 break;
5351 default:
5352 goto illegal_insn;
5354 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5355 xop == 0xe || xop == 0x1e) {
5356 TCGv cpu_val = gen_load_gpr(dc, rd);
5358 switch (xop) {
5359 case 0x4: /* st, store word */
5360 gen_address_mask(dc, cpu_addr);
5361 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5362 break;
5363 case 0x5: /* stb, store byte */
5364 gen_address_mask(dc, cpu_addr);
5365 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5366 break;
5367 case 0x6: /* sth, store halfword */
5368 gen_address_mask(dc, cpu_addr);
5369 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5370 break;
5371 case 0x7: /* std, store double word */
5372 if (rd & 1)
5373 goto illegal_insn;
5374 else {
5375 TCGv_i64 t64;
5376 TCGv lo;
5378 gen_address_mask(dc, cpu_addr);
5379 lo = gen_load_gpr(dc, rd + 1);
5380 t64 = tcg_temp_new_i64();
5381 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5382 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5383 tcg_temp_free_i64(t64);
5385 break;
5386 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5387 case 0x14: /* sta, V9 stwa, store word alternate */
5388 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5389 break;
5390 case 0x15: /* stba, store byte alternate */
5391 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5392 break;
5393 case 0x16: /* stha, store halfword alternate */
5394 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5395 break;
5396 case 0x17: /* stda, store double word alternate */
5397 if (rd & 1) {
5398 goto illegal_insn;
5400 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5401 break;
5402 #endif
5403 #ifdef TARGET_SPARC64
5404 case 0x0e: /* V9 stx */
5405 gen_address_mask(dc, cpu_addr);
5406 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5407 break;
5408 case 0x1e: /* V9 stxa */
5409 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5410 break;
5411 #endif
5412 default:
5413 goto illegal_insn;
5415 } else if (xop > 0x23 && xop < 0x28) {
5416 if (gen_trap_ifnofpu(dc)) {
5417 goto jmp_insn;
5419 switch (xop) {
5420 case 0x24: /* stf, store fpreg */
5422 TCGv t = get_temp_tl(dc);
5423 gen_address_mask(dc, cpu_addr);
5424 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5425 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5426 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5428 break;
5429 case 0x25: /* stfsr, V9 stxfsr */
5431 #ifdef TARGET_SPARC64
5432 gen_address_mask(dc, cpu_addr);
5433 if (rd == 1) {
5434 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5435 break;
5437 #endif
5438 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5440 break;
5441 case 0x26:
5442 #ifdef TARGET_SPARC64
5443 /* V9 stqf, store quad fpreg */
5445 TCGv_i32 r_const;
5447 CHECK_FPU_FEATURE(dc, FLOAT128);
5448 gen_op_load_fpr_QT0(QFPREG(rd));
5449 r_const = tcg_const_i32(dc->mem_idx);
5450 gen_address_mask(dc, cpu_addr);
5451 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5452 tcg_temp_free_i32(r_const);
5454 break;
5455 #else /* !TARGET_SPARC64 */
5456 /* stdfq, store floating point queue */
5457 #if defined(CONFIG_USER_ONLY)
5458 goto illegal_insn;
5459 #else
5460 if (!supervisor(dc))
5461 goto priv_insn;
5462 if (gen_trap_ifnofpu(dc)) {
5463 goto jmp_insn;
5465 goto nfq_insn;
5466 #endif
5467 #endif
5468 case 0x27: /* stdf, store double fpreg */
5469 gen_address_mask(dc, cpu_addr);
5470 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5471 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5472 break;
5473 default:
5474 goto illegal_insn;
5476 } else if (xop > 0x33 && xop < 0x3f) {
5477 switch (xop) {
5478 #ifdef TARGET_SPARC64
5479 case 0x34: /* V9 stfa */
5480 if (gen_trap_ifnofpu(dc)) {
5481 goto jmp_insn;
5483 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5484 break;
5485 case 0x36: /* V9 stqfa */
5487 CHECK_FPU_FEATURE(dc, FLOAT128);
5488 if (gen_trap_ifnofpu(dc)) {
5489 goto jmp_insn;
5491 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5493 break;
5494 case 0x37: /* V9 stdfa */
5495 if (gen_trap_ifnofpu(dc)) {
5496 goto jmp_insn;
5498 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5499 break;
5500 case 0x3e: /* V9 casxa */
5501 rs2 = GET_FIELD(insn, 27, 31);
5502 cpu_src2 = gen_load_gpr(dc, rs2);
5503 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5504 break;
5505 #else
5506 case 0x34: /* stc */
5507 case 0x35: /* stcsr */
5508 case 0x36: /* stdcq */
5509 case 0x37: /* stdc */
5510 goto ncp_insn;
5511 #endif
5512 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5513 case 0x3c: /* V9 or LEON3 casa */
5514 #ifndef TARGET_SPARC64
5515 CHECK_IU_FEATURE(dc, CASA);
5516 #endif
5517 rs2 = GET_FIELD(insn, 27, 31);
5518 cpu_src2 = gen_load_gpr(dc, rs2);
5519 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5520 break;
5521 #endif
5522 default:
5523 goto illegal_insn;
5525 } else {
5526 goto illegal_insn;
5529 break;
5531 /* default case for non jump instructions */
5532 if (dc->npc == DYNAMIC_PC) {
5533 dc->pc = DYNAMIC_PC;
5534 gen_op_next_insn();
5535 } else if (dc->npc == JUMP_PC) {
5536 /* we can do a static jump */
5537 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5538 dc->is_br = 1;
5539 } else {
5540 dc->pc = dc->npc;
5541 dc->npc = dc->npc + 4;
5543 jmp_insn:
5544 goto egress;
5545 illegal_insn:
5546 gen_exception(dc, TT_ILL_INSN);
5547 goto egress;
5548 unimp_flush:
5549 gen_exception(dc, TT_UNIMP_FLUSH);
5550 goto egress;
5551 #if !defined(CONFIG_USER_ONLY)
5552 priv_insn:
5553 gen_exception(dc, TT_PRIV_INSN);
5554 goto egress;
5555 #endif
5556 nfpu_insn:
5557 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5558 goto egress;
5559 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5560 nfq_insn:
5561 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5562 goto egress;
5563 #endif
5564 #ifndef TARGET_SPARC64
5565 ncp_insn:
5566 gen_exception(dc, TT_NCP_INSN);
5567 goto egress;
5568 #endif
5569 egress:
5570 if (dc->n_t32 != 0) {
5571 int i;
5572 for (i = dc->n_t32 - 1; i >= 0; --i) {
5573 tcg_temp_free_i32(dc->t32[i]);
5575 dc->n_t32 = 0;
5577 if (dc->n_ttl != 0) {
5578 int i;
5579 for (i = dc->n_ttl - 1; i >= 0; --i) {
5580 tcg_temp_free(dc->ttl[i]);
5582 dc->n_ttl = 0;
5586 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5588 SPARCCPU *cpu = sparc_env_get_cpu(env);
5589 CPUState *cs = CPU(cpu);
5590 target_ulong pc_start, last_pc;
5591 DisasContext dc1, *dc = &dc1;
5592 int num_insns;
5593 int max_insns;
5594 unsigned int insn;
5596 memset(dc, 0, sizeof(DisasContext));
5597 dc->tb = tb;
5598 pc_start = tb->pc;
5599 dc->pc = pc_start;
5600 last_pc = dc->pc;
5601 dc->npc = (target_ulong) tb->cs_base;
5602 dc->cc_op = CC_OP_DYNAMIC;
5603 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5604 dc->def = env->def;
5605 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5606 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5607 dc->singlestep = (cs->singlestep_enabled || singlestep);
5608 #ifdef TARGET_SPARC64
5609 dc->fprs_dirty = 0;
5610 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5611 #endif
5613 num_insns = 0;
5614 max_insns = tb->cflags & CF_COUNT_MASK;
5615 if (max_insns == 0) {
5616 max_insns = CF_COUNT_MASK;
5618 if (max_insns > TCG_MAX_INSNS) {
5619 max_insns = TCG_MAX_INSNS;
5622 gen_tb_start(tb);
5623 do {
5624 if (dc->npc & JUMP_PC) {
5625 assert(dc->jump_pc[1] == dc->pc + 4);
5626 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5627 } else {
5628 tcg_gen_insn_start(dc->pc, dc->npc);
5630 num_insns++;
5631 last_pc = dc->pc;
5633 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5634 if (dc->pc != pc_start) {
5635 save_state(dc);
5637 gen_helper_debug(cpu_env);
5638 tcg_gen_exit_tb(0);
5639 dc->is_br = 1;
5640 goto exit_gen_loop;
5643 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5644 gen_io_start();
5647 insn = cpu_ldl_code(env, dc->pc);
5649 disas_sparc_insn(dc, insn);
5651 if (dc->is_br)
5652 break;
5653 /* if the next PC is different, we abort now */
5654 if (dc->pc != (last_pc + 4))
5655 break;
5656 /* if we reach a page boundary, we stop generation so that the
5657 PC of a TT_TFAULT exception is always in the right page */
5658 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5659 break;
5660 /* if single step mode, we generate only one instruction and
5661 generate an exception */
5662 if (dc->singlestep) {
5663 break;
5665 } while (!tcg_op_buf_full() &&
5666 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5667 num_insns < max_insns);
5669 exit_gen_loop:
5670 if (tb->cflags & CF_LAST_IO) {
5671 gen_io_end();
5673 if (!dc->is_br) {
5674 if (dc->pc != DYNAMIC_PC &&
5675 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5676 /* static PC and NPC: we can use direct chaining */
5677 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5678 } else {
5679 if (dc->pc != DYNAMIC_PC) {
5680 tcg_gen_movi_tl(cpu_pc, dc->pc);
5682 save_npc(dc);
5683 tcg_gen_exit_tb(0);
5686 gen_tb_end(tb, num_insns);
5688 tb->size = last_pc + 4 - pc_start;
5689 tb->icount = num_insns;
5691 #ifdef DEBUG_DISAS
5692 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5693 && qemu_log_in_addr_range(pc_start)) {
5694 qemu_log("--------------\n");
5695 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5696 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5697 qemu_log("\n");
5699 #endif
5702 void gen_intermediate_code_init(CPUSPARCState *env)
5704 static int inited;
5705 static const char gregnames[32][4] = {
5706 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5707 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5708 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5709 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5711 static const char fregnames[32][4] = {
5712 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5713 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5714 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5715 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5718 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5719 #ifdef TARGET_SPARC64
5720 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5721 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5722 #else
5723 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5724 #endif
5725 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5726 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5729 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5730 #ifdef TARGET_SPARC64
5731 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5732 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5733 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5734 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5735 "hstick_cmpr" },
5736 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5737 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5738 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5739 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5740 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5741 #endif
5742 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5743 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5744 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5745 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5746 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5747 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5748 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5749 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5750 #ifndef CONFIG_USER_ONLY
5751 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5752 #endif
5755 unsigned int i;
5757 /* init various static tables */
5758 if (inited) {
5759 return;
5761 inited = 1;
5763 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5764 tcg_ctx.tcg_env = cpu_env;
5766 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5767 offsetof(CPUSPARCState, regwptr),
5768 "regwptr");
5770 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5771 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5774 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5775 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5778 TCGV_UNUSED(cpu_regs[0]);
5779 for (i = 1; i < 8; ++i) {
5780 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5781 offsetof(CPUSPARCState, gregs[i]),
5782 gregnames[i]);
5785 for (i = 8; i < 32; ++i) {
5786 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5787 (i - 8) * sizeof(target_ulong),
5788 gregnames[i]);
5791 for (i = 0; i < TARGET_DPREGS; i++) {
5792 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5793 offsetof(CPUSPARCState, fpr[i]),
5794 fregnames[i]);
5798 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5799 target_ulong *data)
5801 target_ulong pc = data[0];
5802 target_ulong npc = data[1];
5804 env->pc = pc;
5805 if (npc == DYNAMIC_PC) {
5806 /* dynamic NPC: already stored */
5807 } else if (npc & JUMP_PC) {
5808 /* jump PC: use 'cond' and the jump targets of the translation */
5809 if (env->cond) {
5810 env->npc = npc & ~3;
5811 } else {
5812 env->npc = pc + 4;
5814 } else {
5815 env->npc = npc;