uninorth: remove obsolete pci_pmac_u3_init() function
[qemu.git] / target / sparc / translate.c
blob5aa367a18227d6fb0a332ab310959261cfdd9935
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_ptr cpu_regwptr;
45 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
46 static TCGv_i32 cpu_cc_op;
47 static TCGv_i32 cpu_psr;
48 static TCGv cpu_fsr, cpu_pc, cpu_npc;
49 static TCGv cpu_regs[32];
50 static TCGv cpu_y;
51 #ifndef CONFIG_USER_ONLY
52 static TCGv cpu_tbr;
53 #endif
54 static TCGv cpu_cond;
55 #ifdef TARGET_SPARC64
56 static TCGv_i32 cpu_xcc, cpu_fprs;
57 static TCGv cpu_gsr;
58 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
59 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
60 #else
61 static TCGv cpu_wim;
62 #endif
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
66 #include "exec/gen-icount.h"
68 typedef struct DisasContext {
69 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
72 int is_br;
73 int mem_idx;
74 bool fpu_enabled;
75 bool address_mask_32bit;
76 bool singlestep;
77 #ifndef CONFIG_USER_ONLY
78 bool supervisor;
79 #ifdef TARGET_SPARC64
80 bool hypervisor;
81 #endif
82 #endif
84 uint32_t cc_op; /* current CC operation */
85 struct TranslationBlock *tb;
86 sparc_def_t *def;
87 TCGv_i32 t32[3];
88 TCGv ttl[5];
89 int n_t32;
90 int n_ttl;
91 #ifdef TARGET_SPARC64
92 int fprs_dirty;
93 int asi;
94 #endif
95 } DisasContext;
97 typedef struct {
98 TCGCond cond;
99 bool is_bool;
100 bool g1, g2;
101 TCGv c1, c2;
102 } DisasCompare;
104 // This function uses non-native bit order
105 #define GET_FIELD(X, FROM, TO) \
106 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
108 // This function uses the order in the manuals, i.e. bit 0 is 2^0
109 #define GET_FIELD_SP(X, FROM, TO) \
110 GET_FIELD(X, 31 - (TO), 31 - (FROM))
112 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
113 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
115 #ifdef TARGET_SPARC64
116 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
117 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
118 #else
119 #define DFPREG(r) (r & 0x1e)
120 #define QFPREG(r) (r & 0x1c)
121 #endif
123 #define UA2005_HTRAP_MASK 0xff
124 #define V8_TRAP_MASK 0x7f
126 static int sign_extend(int x, int len)
128 len = 32 - len;
129 return (x << len) >> len;
132 #define IS_IMM (insn & (1<<13))
134 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
136 TCGv_i32 t;
137 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
138 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
139 return t;
142 static inline TCGv get_temp_tl(DisasContext *dc)
144 TCGv t;
145 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
146 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
147 return t;
150 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
152 #if defined(TARGET_SPARC64)
153 int bit = (rd < 32) ? 1 : 2;
154 /* If we know we've already set this bit within the TB,
155 we can avoid setting it again. */
156 if (!(dc->fprs_dirty & bit)) {
157 dc->fprs_dirty |= bit;
158 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
160 #endif
163 /* floating point registers moves */
164 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
166 #if TCG_TARGET_REG_BITS == 32
167 if (src & 1) {
168 return TCGV_LOW(cpu_fpr[src / 2]);
169 } else {
170 return TCGV_HIGH(cpu_fpr[src / 2]);
172 #else
173 TCGv_i32 ret = get_temp_i32(dc);
174 if (src & 1) {
175 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
176 } else {
177 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
179 return ret;
180 #endif
183 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
185 #if TCG_TARGET_REG_BITS == 32
186 if (dst & 1) {
187 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
188 } else {
189 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
191 #else
192 TCGv_i64 t = (TCGv_i64)v;
193 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
194 (dst & 1 ? 0 : 32), 32);
195 #endif
196 gen_update_fprs_dirty(dc, dst);
199 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
201 return get_temp_i32(dc);
204 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
206 src = DFPREG(src);
207 return cpu_fpr[src / 2];
210 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
212 dst = DFPREG(dst);
213 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
214 gen_update_fprs_dirty(dc, dst);
217 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
219 return cpu_fpr[DFPREG(dst) / 2];
222 static void gen_op_load_fpr_QT0(unsigned int src)
224 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
225 offsetof(CPU_QuadU, ll.upper));
226 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
227 offsetof(CPU_QuadU, ll.lower));
230 static void gen_op_load_fpr_QT1(unsigned int src)
232 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
233 offsetof(CPU_QuadU, ll.upper));
234 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
235 offsetof(CPU_QuadU, ll.lower));
238 static void gen_op_store_QT0_fpr(unsigned int dst)
240 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
241 offsetof(CPU_QuadU, ll.upper));
242 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
243 offsetof(CPU_QuadU, ll.lower));
246 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
247 TCGv_i64 v1, TCGv_i64 v2)
249 dst = QFPREG(dst);
251 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
252 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
253 gen_update_fprs_dirty(dc, dst);
256 #ifdef TARGET_SPARC64
257 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
259 src = QFPREG(src);
260 return cpu_fpr[src / 2];
263 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
265 src = QFPREG(src);
266 return cpu_fpr[src / 2 + 1];
269 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
271 rd = QFPREG(rd);
272 rs = QFPREG(rs);
274 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
275 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
276 gen_update_fprs_dirty(dc, rd);
278 #endif
280 /* moves */
281 #ifdef CONFIG_USER_ONLY
282 #define supervisor(dc) 0
283 #ifdef TARGET_SPARC64
284 #define hypervisor(dc) 0
285 #endif
286 #else
287 #ifdef TARGET_SPARC64
288 #define hypervisor(dc) (dc->hypervisor)
289 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
290 #else
291 #define supervisor(dc) (dc->supervisor)
292 #endif
293 #endif
295 #ifdef TARGET_SPARC64
296 #ifndef TARGET_ABI32
297 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
298 #else
299 #define AM_CHECK(dc) (1)
300 #endif
301 #endif
303 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
305 #ifdef TARGET_SPARC64
306 if (AM_CHECK(dc))
307 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
308 #endif
311 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
313 if (reg > 0) {
314 assert(reg < 32);
315 return cpu_regs[reg];
316 } else {
317 TCGv t = get_temp_tl(dc);
318 tcg_gen_movi_tl(t, 0);
319 return t;
323 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
325 if (reg > 0) {
326 assert(reg < 32);
327 tcg_gen_mov_tl(cpu_regs[reg], v);
331 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
333 if (reg > 0) {
334 assert(reg < 32);
335 return cpu_regs[reg];
336 } else {
337 return get_temp_tl(dc);
341 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
342 target_ulong npc)
344 if (unlikely(s->singlestep)) {
345 return false;
348 #ifndef CONFIG_USER_ONLY
349 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
350 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
351 #else
352 return true;
353 #endif
356 static inline void gen_goto_tb(DisasContext *s, int tb_num,
357 target_ulong pc, target_ulong npc)
359 if (use_goto_tb(s, pc, npc)) {
360 /* jump to same page: we can use a direct jump */
361 tcg_gen_goto_tb(tb_num);
362 tcg_gen_movi_tl(cpu_pc, pc);
363 tcg_gen_movi_tl(cpu_npc, npc);
364 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
365 } else {
366 /* jump to another page: currently not optimized */
367 tcg_gen_movi_tl(cpu_pc, pc);
368 tcg_gen_movi_tl(cpu_npc, npc);
369 tcg_gen_exit_tb(0);
373 // XXX suboptimal
374 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
376 tcg_gen_extu_i32_tl(reg, src);
377 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
380 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
386 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
388 tcg_gen_extu_i32_tl(reg, src);
389 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
392 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
394 tcg_gen_extu_i32_tl(reg, src);
395 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
398 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
400 tcg_gen_mov_tl(cpu_cc_src, src1);
401 tcg_gen_mov_tl(cpu_cc_src2, src2);
402 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
403 tcg_gen_mov_tl(dst, cpu_cc_dst);
406 static TCGv_i32 gen_add32_carry32(void)
408 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
410 /* Carry is computed from a previous add: (dst < src) */
411 #if TARGET_LONG_BITS == 64
412 cc_src1_32 = tcg_temp_new_i32();
413 cc_src2_32 = tcg_temp_new_i32();
414 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
415 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
416 #else
417 cc_src1_32 = cpu_cc_dst;
418 cc_src2_32 = cpu_cc_src;
419 #endif
421 carry_32 = tcg_temp_new_i32();
422 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
424 #if TARGET_LONG_BITS == 64
425 tcg_temp_free_i32(cc_src1_32);
426 tcg_temp_free_i32(cc_src2_32);
427 #endif
429 return carry_32;
432 static TCGv_i32 gen_sub32_carry32(void)
434 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
436 /* Carry is computed from a previous borrow: (src1 < src2) */
437 #if TARGET_LONG_BITS == 64
438 cc_src1_32 = tcg_temp_new_i32();
439 cc_src2_32 = tcg_temp_new_i32();
440 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
441 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
442 #else
443 cc_src1_32 = cpu_cc_src;
444 cc_src2_32 = cpu_cc_src2;
445 #endif
447 carry_32 = tcg_temp_new_i32();
448 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
450 #if TARGET_LONG_BITS == 64
451 tcg_temp_free_i32(cc_src1_32);
452 tcg_temp_free_i32(cc_src2_32);
453 #endif
455 return carry_32;
458 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
459 TCGv src2, int update_cc)
461 TCGv_i32 carry_32;
462 TCGv carry;
464 switch (dc->cc_op) {
465 case CC_OP_DIV:
466 case CC_OP_LOGIC:
467 /* Carry is known to be zero. Fall back to plain ADD. */
468 if (update_cc) {
469 gen_op_add_cc(dst, src1, src2);
470 } else {
471 tcg_gen_add_tl(dst, src1, src2);
473 return;
475 case CC_OP_ADD:
476 case CC_OP_TADD:
477 case CC_OP_TADDTV:
478 if (TARGET_LONG_BITS == 32) {
479 /* We can re-use the host's hardware carry generation by using
480 an ADD2 opcode. We discard the low part of the output.
481 Ideally we'd combine this operation with the add that
482 generated the carry in the first place. */
483 carry = tcg_temp_new();
484 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
485 tcg_temp_free(carry);
486 goto add_done;
488 carry_32 = gen_add32_carry32();
489 break;
491 case CC_OP_SUB:
492 case CC_OP_TSUB:
493 case CC_OP_TSUBTV:
494 carry_32 = gen_sub32_carry32();
495 break;
497 default:
498 /* We need external help to produce the carry. */
499 carry_32 = tcg_temp_new_i32();
500 gen_helper_compute_C_icc(carry_32, cpu_env);
501 break;
504 #if TARGET_LONG_BITS == 64
505 carry = tcg_temp_new();
506 tcg_gen_extu_i32_i64(carry, carry_32);
507 #else
508 carry = carry_32;
509 #endif
511 tcg_gen_add_tl(dst, src1, src2);
512 tcg_gen_add_tl(dst, dst, carry);
514 tcg_temp_free_i32(carry_32);
515 #if TARGET_LONG_BITS == 64
516 tcg_temp_free(carry);
517 #endif
519 add_done:
520 if (update_cc) {
521 tcg_gen_mov_tl(cpu_cc_src, src1);
522 tcg_gen_mov_tl(cpu_cc_src2, src2);
523 tcg_gen_mov_tl(cpu_cc_dst, dst);
524 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
525 dc->cc_op = CC_OP_ADDX;
529 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
534 tcg_gen_mov_tl(dst, cpu_cc_dst);
537 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
538 TCGv src2, int update_cc)
540 TCGv_i32 carry_32;
541 TCGv carry;
543 switch (dc->cc_op) {
544 case CC_OP_DIV:
545 case CC_OP_LOGIC:
546 /* Carry is known to be zero. Fall back to plain SUB. */
547 if (update_cc) {
548 gen_op_sub_cc(dst, src1, src2);
549 } else {
550 tcg_gen_sub_tl(dst, src1, src2);
552 return;
554 case CC_OP_ADD:
555 case CC_OP_TADD:
556 case CC_OP_TADDTV:
557 carry_32 = gen_add32_carry32();
558 break;
560 case CC_OP_SUB:
561 case CC_OP_TSUB:
562 case CC_OP_TSUBTV:
563 if (TARGET_LONG_BITS == 32) {
564 /* We can re-use the host's hardware carry generation by using
565 a SUB2 opcode. We discard the low part of the output.
566 Ideally we'd combine this operation with the add that
567 generated the carry in the first place. */
568 carry = tcg_temp_new();
569 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
570 tcg_temp_free(carry);
571 goto sub_done;
573 carry_32 = gen_sub32_carry32();
574 break;
576 default:
577 /* We need external help to produce the carry. */
578 carry_32 = tcg_temp_new_i32();
579 gen_helper_compute_C_icc(carry_32, cpu_env);
580 break;
583 #if TARGET_LONG_BITS == 64
584 carry = tcg_temp_new();
585 tcg_gen_extu_i32_i64(carry, carry_32);
586 #else
587 carry = carry_32;
588 #endif
590 tcg_gen_sub_tl(dst, src1, src2);
591 tcg_gen_sub_tl(dst, dst, carry);
593 tcg_temp_free_i32(carry_32);
594 #if TARGET_LONG_BITS == 64
595 tcg_temp_free(carry);
596 #endif
598 sub_done:
599 if (update_cc) {
600 tcg_gen_mov_tl(cpu_cc_src, src1);
601 tcg_gen_mov_tl(cpu_cc_src2, src2);
602 tcg_gen_mov_tl(cpu_cc_dst, dst);
603 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
604 dc->cc_op = CC_OP_SUBX;
608 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
610 TCGv r_temp, zero, t0;
612 r_temp = tcg_temp_new();
613 t0 = tcg_temp_new();
615 /* old op:
616 if (!(env->y & 1))
617 T1 = 0;
619 zero = tcg_const_tl(0);
620 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
621 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
622 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
623 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
624 zero, cpu_cc_src2);
625 tcg_temp_free(zero);
627 // b2 = T0 & 1;
628 // env->y = (b2 << 31) | (env->y >> 1);
629 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
630 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
632 // b1 = N ^ V;
633 gen_mov_reg_N(t0, cpu_psr);
634 gen_mov_reg_V(r_temp, cpu_psr);
635 tcg_gen_xor_tl(t0, t0, r_temp);
636 tcg_temp_free(r_temp);
638 // T0 = (b1 << 31) | (T0 >> 1);
639 // src1 = T0;
640 tcg_gen_shli_tl(t0, t0, 31);
641 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
642 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
643 tcg_temp_free(t0);
645 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
647 tcg_gen_mov_tl(dst, cpu_cc_dst);
650 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
652 #if TARGET_LONG_BITS == 32
653 if (sign_ext) {
654 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
655 } else {
656 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
658 #else
659 TCGv t0 = tcg_temp_new_i64();
660 TCGv t1 = tcg_temp_new_i64();
662 if (sign_ext) {
663 tcg_gen_ext32s_i64(t0, src1);
664 tcg_gen_ext32s_i64(t1, src2);
665 } else {
666 tcg_gen_ext32u_i64(t0, src1);
667 tcg_gen_ext32u_i64(t1, src2);
670 tcg_gen_mul_i64(dst, t0, t1);
671 tcg_temp_free(t0);
672 tcg_temp_free(t1);
674 tcg_gen_shri_i64(cpu_y, dst, 32);
675 #endif
678 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
680 /* zero-extend truncated operands before multiplication */
681 gen_op_multiply(dst, src1, src2, 0);
684 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
686 /* sign-extend truncated operands before multiplication */
687 gen_op_multiply(dst, src1, src2, 1);
690 // 1
691 static inline void gen_op_eval_ba(TCGv dst)
693 tcg_gen_movi_tl(dst, 1);
696 // Z
697 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
699 gen_mov_reg_Z(dst, src);
702 // Z | (N ^ V)
703 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
705 TCGv t0 = tcg_temp_new();
706 gen_mov_reg_N(t0, src);
707 gen_mov_reg_V(dst, src);
708 tcg_gen_xor_tl(dst, dst, t0);
709 gen_mov_reg_Z(t0, src);
710 tcg_gen_or_tl(dst, dst, t0);
711 tcg_temp_free(t0);
714 // N ^ V
715 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
717 TCGv t0 = tcg_temp_new();
718 gen_mov_reg_V(t0, src);
719 gen_mov_reg_N(dst, src);
720 tcg_gen_xor_tl(dst, dst, t0);
721 tcg_temp_free(t0);
724 // C | Z
725 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
727 TCGv t0 = tcg_temp_new();
728 gen_mov_reg_Z(t0, src);
729 gen_mov_reg_C(dst, src);
730 tcg_gen_or_tl(dst, dst, t0);
731 tcg_temp_free(t0);
734 // C
735 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
737 gen_mov_reg_C(dst, src);
740 // V
741 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
743 gen_mov_reg_V(dst, src);
746 // 0
747 static inline void gen_op_eval_bn(TCGv dst)
749 tcg_gen_movi_tl(dst, 0);
752 // N
753 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
755 gen_mov_reg_N(dst, src);
758 // !Z
759 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
761 gen_mov_reg_Z(dst, src);
762 tcg_gen_xori_tl(dst, dst, 0x1);
765 // !(Z | (N ^ V))
766 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
768 gen_op_eval_ble(dst, src);
769 tcg_gen_xori_tl(dst, dst, 0x1);
772 // !(N ^ V)
773 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
775 gen_op_eval_bl(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
779 // !(C | Z)
780 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
782 gen_op_eval_bleu(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
786 // !C
787 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
789 gen_mov_reg_C(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
793 // !N
794 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
796 gen_mov_reg_N(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
800 // !V
801 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
803 gen_mov_reg_V(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
808 FPSR bit field FCC1 | FCC0:
812 3 unordered
814 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
815 unsigned int fcc_offset)
817 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
818 tcg_gen_andi_tl(reg, reg, 0x1);
821 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
822 unsigned int fcc_offset)
824 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
825 tcg_gen_andi_tl(reg, reg, 0x1);
828 // !0: FCC0 | FCC1
829 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
830 unsigned int fcc_offset)
832 TCGv t0 = tcg_temp_new();
833 gen_mov_reg_FCC0(dst, src, fcc_offset);
834 gen_mov_reg_FCC1(t0, src, fcc_offset);
835 tcg_gen_or_tl(dst, dst, t0);
836 tcg_temp_free(t0);
839 // 1 or 2: FCC0 ^ FCC1
840 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
841 unsigned int fcc_offset)
843 TCGv t0 = tcg_temp_new();
844 gen_mov_reg_FCC0(dst, src, fcc_offset);
845 gen_mov_reg_FCC1(t0, src, fcc_offset);
846 tcg_gen_xor_tl(dst, dst, t0);
847 tcg_temp_free(t0);
850 // 1 or 3: FCC0
851 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
852 unsigned int fcc_offset)
854 gen_mov_reg_FCC0(dst, src, fcc_offset);
857 // 1: FCC0 & !FCC1
858 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
859 unsigned int fcc_offset)
861 TCGv t0 = tcg_temp_new();
862 gen_mov_reg_FCC0(dst, src, fcc_offset);
863 gen_mov_reg_FCC1(t0, src, fcc_offset);
864 tcg_gen_andc_tl(dst, dst, t0);
865 tcg_temp_free(t0);
868 // 2 or 3: FCC1
869 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
870 unsigned int fcc_offset)
872 gen_mov_reg_FCC1(dst, src, fcc_offset);
875 // 2: !FCC0 & FCC1
876 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
877 unsigned int fcc_offset)
879 TCGv t0 = tcg_temp_new();
880 gen_mov_reg_FCC0(dst, src, fcc_offset);
881 gen_mov_reg_FCC1(t0, src, fcc_offset);
882 tcg_gen_andc_tl(dst, t0, dst);
883 tcg_temp_free(t0);
886 // 3: FCC0 & FCC1
887 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
888 unsigned int fcc_offset)
890 TCGv t0 = tcg_temp_new();
891 gen_mov_reg_FCC0(dst, src, fcc_offset);
892 gen_mov_reg_FCC1(t0, src, fcc_offset);
893 tcg_gen_and_tl(dst, dst, t0);
894 tcg_temp_free(t0);
897 // 0: !(FCC0 | FCC1)
898 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
899 unsigned int fcc_offset)
901 TCGv t0 = tcg_temp_new();
902 gen_mov_reg_FCC0(dst, src, fcc_offset);
903 gen_mov_reg_FCC1(t0, src, fcc_offset);
904 tcg_gen_or_tl(dst, dst, t0);
905 tcg_gen_xori_tl(dst, dst, 0x1);
906 tcg_temp_free(t0);
909 // 0 or 3: !(FCC0 ^ FCC1)
910 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
911 unsigned int fcc_offset)
913 TCGv t0 = tcg_temp_new();
914 gen_mov_reg_FCC0(dst, src, fcc_offset);
915 gen_mov_reg_FCC1(t0, src, fcc_offset);
916 tcg_gen_xor_tl(dst, dst, t0);
917 tcg_gen_xori_tl(dst, dst, 0x1);
918 tcg_temp_free(t0);
921 // 0 or 2: !FCC0
922 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
923 unsigned int fcc_offset)
925 gen_mov_reg_FCC0(dst, src, fcc_offset);
926 tcg_gen_xori_tl(dst, dst, 0x1);
929 // !1: !(FCC0 & !FCC1)
930 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
931 unsigned int fcc_offset)
933 TCGv t0 = tcg_temp_new();
934 gen_mov_reg_FCC0(dst, src, fcc_offset);
935 gen_mov_reg_FCC1(t0, src, fcc_offset);
936 tcg_gen_andc_tl(dst, dst, t0);
937 tcg_gen_xori_tl(dst, dst, 0x1);
938 tcg_temp_free(t0);
941 // 0 or 1: !FCC1
942 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
943 unsigned int fcc_offset)
945 gen_mov_reg_FCC1(dst, src, fcc_offset);
946 tcg_gen_xori_tl(dst, dst, 0x1);
949 // !2: !(!FCC0 & FCC1)
950 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
951 unsigned int fcc_offset)
953 TCGv t0 = tcg_temp_new();
954 gen_mov_reg_FCC0(dst, src, fcc_offset);
955 gen_mov_reg_FCC1(t0, src, fcc_offset);
956 tcg_gen_andc_tl(dst, t0, dst);
957 tcg_gen_xori_tl(dst, dst, 0x1);
958 tcg_temp_free(t0);
961 // !3: !(FCC0 & FCC1)
962 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
963 unsigned int fcc_offset)
965 TCGv t0 = tcg_temp_new();
966 gen_mov_reg_FCC0(dst, src, fcc_offset);
967 gen_mov_reg_FCC1(t0, src, fcc_offset);
968 tcg_gen_and_tl(dst, dst, t0);
969 tcg_gen_xori_tl(dst, dst, 0x1);
970 tcg_temp_free(t0);
973 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
974 target_ulong pc2, TCGv r_cond)
976 TCGLabel *l1 = gen_new_label();
978 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
980 gen_goto_tb(dc, 0, pc1, pc1 + 4);
982 gen_set_label(l1);
983 gen_goto_tb(dc, 1, pc2, pc2 + 4);
986 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
988 TCGLabel *l1 = gen_new_label();
989 target_ulong npc = dc->npc;
991 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
993 gen_goto_tb(dc, 0, npc, pc1);
995 gen_set_label(l1);
996 gen_goto_tb(dc, 1, npc + 4, npc + 8);
998 dc->is_br = 1;
1001 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1003 target_ulong npc = dc->npc;
1005 if (likely(npc != DYNAMIC_PC)) {
1006 dc->pc = npc;
1007 dc->jump_pc[0] = pc1;
1008 dc->jump_pc[1] = npc + 4;
1009 dc->npc = JUMP_PC;
1010 } else {
1011 TCGv t, z;
1013 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1015 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1016 t = tcg_const_tl(pc1);
1017 z = tcg_const_tl(0);
1018 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1019 tcg_temp_free(t);
1020 tcg_temp_free(z);
1022 dc->pc = DYNAMIC_PC;
1026 static inline void gen_generic_branch(DisasContext *dc)
1028 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1029 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1030 TCGv zero = tcg_const_tl(0);
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1034 tcg_temp_free(npc0);
1035 tcg_temp_free(npc1);
1036 tcg_temp_free(zero);
1039 /* call this function before using the condition register as it may
1040 have been set for a jump */
1041 static inline void flush_cond(DisasContext *dc)
1043 if (dc->npc == JUMP_PC) {
1044 gen_generic_branch(dc);
1045 dc->npc = DYNAMIC_PC;
1049 static inline void save_npc(DisasContext *dc)
1051 if (dc->npc == JUMP_PC) {
1052 gen_generic_branch(dc);
1053 dc->npc = DYNAMIC_PC;
1054 } else if (dc->npc != DYNAMIC_PC) {
1055 tcg_gen_movi_tl(cpu_npc, dc->npc);
1059 static inline void update_psr(DisasContext *dc)
1061 if (dc->cc_op != CC_OP_FLAGS) {
1062 dc->cc_op = CC_OP_FLAGS;
1063 gen_helper_compute_psr(cpu_env);
1067 static inline void save_state(DisasContext *dc)
1069 tcg_gen_movi_tl(cpu_pc, dc->pc);
1070 save_npc(dc);
1073 static void gen_exception(DisasContext *dc, int which)
1075 TCGv_i32 t;
1077 save_state(dc);
1078 t = tcg_const_i32(which);
1079 gen_helper_raise_exception(cpu_env, t);
1080 tcg_temp_free_i32(t);
1081 dc->is_br = 1;
1084 static void gen_check_align(TCGv addr, int mask)
1086 TCGv_i32 r_mask = tcg_const_i32(mask);
1087 gen_helper_check_align(cpu_env, addr, r_mask);
1088 tcg_temp_free_i32(r_mask);
1091 static inline void gen_mov_pc_npc(DisasContext *dc)
1093 if (dc->npc == JUMP_PC) {
1094 gen_generic_branch(dc);
1095 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1096 dc->pc = DYNAMIC_PC;
1097 } else if (dc->npc == DYNAMIC_PC) {
1098 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1099 dc->pc = DYNAMIC_PC;
1100 } else {
1101 dc->pc = dc->npc;
1105 static inline void gen_op_next_insn(void)
1107 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1108 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1111 static void free_compare(DisasCompare *cmp)
1113 if (!cmp->g1) {
1114 tcg_temp_free(cmp->c1);
1116 if (!cmp->g2) {
1117 tcg_temp_free(cmp->c2);
1121 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1122 DisasContext *dc)
1124 static int subcc_cond[16] = {
1125 TCG_COND_NEVER,
1126 TCG_COND_EQ,
1127 TCG_COND_LE,
1128 TCG_COND_LT,
1129 TCG_COND_LEU,
1130 TCG_COND_LTU,
1131 -1, /* neg */
1132 -1, /* overflow */
1133 TCG_COND_ALWAYS,
1134 TCG_COND_NE,
1135 TCG_COND_GT,
1136 TCG_COND_GE,
1137 TCG_COND_GTU,
1138 TCG_COND_GEU,
1139 -1, /* pos */
1140 -1, /* no overflow */
1143 static int logic_cond[16] = {
1144 TCG_COND_NEVER,
1145 TCG_COND_EQ, /* eq: Z */
1146 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1147 TCG_COND_LT, /* lt: N ^ V -> N */
1148 TCG_COND_EQ, /* leu: C | Z -> Z */
1149 TCG_COND_NEVER, /* ltu: C -> 0 */
1150 TCG_COND_LT, /* neg: N */
1151 TCG_COND_NEVER, /* vs: V -> 0 */
1152 TCG_COND_ALWAYS,
1153 TCG_COND_NE, /* ne: !Z */
1154 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1155 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1156 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1157 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1158 TCG_COND_GE, /* pos: !N */
1159 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1162 TCGv_i32 r_src;
1163 TCGv r_dst;
1165 #ifdef TARGET_SPARC64
1166 if (xcc) {
1167 r_src = cpu_xcc;
1168 } else {
1169 r_src = cpu_psr;
1171 #else
1172 r_src = cpu_psr;
1173 #endif
1175 switch (dc->cc_op) {
1176 case CC_OP_LOGIC:
1177 cmp->cond = logic_cond[cond];
1178 do_compare_dst_0:
1179 cmp->is_bool = false;
1180 cmp->g2 = false;
1181 cmp->c2 = tcg_const_tl(0);
1182 #ifdef TARGET_SPARC64
1183 if (!xcc) {
1184 cmp->g1 = false;
1185 cmp->c1 = tcg_temp_new();
1186 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1187 break;
1189 #endif
1190 cmp->g1 = true;
1191 cmp->c1 = cpu_cc_dst;
1192 break;
1194 case CC_OP_SUB:
1195 switch (cond) {
1196 case 6: /* neg */
1197 case 14: /* pos */
1198 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1199 goto do_compare_dst_0;
1201 case 7: /* overflow */
1202 case 15: /* !overflow */
1203 goto do_dynamic;
1205 default:
1206 cmp->cond = subcc_cond[cond];
1207 cmp->is_bool = false;
1208 #ifdef TARGET_SPARC64
1209 if (!xcc) {
1210 /* Note that sign-extension works for unsigned compares as
1211 long as both operands are sign-extended. */
1212 cmp->g1 = cmp->g2 = false;
1213 cmp->c1 = tcg_temp_new();
1214 cmp->c2 = tcg_temp_new();
1215 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1216 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1217 break;
1219 #endif
1220 cmp->g1 = cmp->g2 = true;
1221 cmp->c1 = cpu_cc_src;
1222 cmp->c2 = cpu_cc_src2;
1223 break;
1225 break;
1227 default:
1228 do_dynamic:
1229 gen_helper_compute_psr(cpu_env);
1230 dc->cc_op = CC_OP_FLAGS;
1231 /* FALLTHRU */
1233 case CC_OP_FLAGS:
1234 /* We're going to generate a boolean result. */
1235 cmp->cond = TCG_COND_NE;
1236 cmp->is_bool = true;
1237 cmp->g1 = cmp->g2 = false;
1238 cmp->c1 = r_dst = tcg_temp_new();
1239 cmp->c2 = tcg_const_tl(0);
1241 switch (cond) {
1242 case 0x0:
1243 gen_op_eval_bn(r_dst);
1244 break;
1245 case 0x1:
1246 gen_op_eval_be(r_dst, r_src);
1247 break;
1248 case 0x2:
1249 gen_op_eval_ble(r_dst, r_src);
1250 break;
1251 case 0x3:
1252 gen_op_eval_bl(r_dst, r_src);
1253 break;
1254 case 0x4:
1255 gen_op_eval_bleu(r_dst, r_src);
1256 break;
1257 case 0x5:
1258 gen_op_eval_bcs(r_dst, r_src);
1259 break;
1260 case 0x6:
1261 gen_op_eval_bneg(r_dst, r_src);
1262 break;
1263 case 0x7:
1264 gen_op_eval_bvs(r_dst, r_src);
1265 break;
1266 case 0x8:
1267 gen_op_eval_ba(r_dst);
1268 break;
1269 case 0x9:
1270 gen_op_eval_bne(r_dst, r_src);
1271 break;
1272 case 0xa:
1273 gen_op_eval_bg(r_dst, r_src);
1274 break;
1275 case 0xb:
1276 gen_op_eval_bge(r_dst, r_src);
1277 break;
1278 case 0xc:
1279 gen_op_eval_bgu(r_dst, r_src);
1280 break;
1281 case 0xd:
1282 gen_op_eval_bcc(r_dst, r_src);
1283 break;
1284 case 0xe:
1285 gen_op_eval_bpos(r_dst, r_src);
1286 break;
1287 case 0xf:
1288 gen_op_eval_bvc(r_dst, r_src);
1289 break;
1291 break;
1295 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1297 unsigned int offset;
1298 TCGv r_dst;
1300 /* For now we still generate a straight boolean result. */
1301 cmp->cond = TCG_COND_NE;
1302 cmp->is_bool = true;
1303 cmp->g1 = cmp->g2 = false;
1304 cmp->c1 = r_dst = tcg_temp_new();
1305 cmp->c2 = tcg_const_tl(0);
1307 switch (cc) {
1308 default:
1309 case 0x0:
1310 offset = 0;
1311 break;
1312 case 0x1:
1313 offset = 32 - 10;
1314 break;
1315 case 0x2:
1316 offset = 34 - 10;
1317 break;
1318 case 0x3:
1319 offset = 36 - 10;
1320 break;
1323 switch (cond) {
1324 case 0x0:
1325 gen_op_eval_bn(r_dst);
1326 break;
1327 case 0x1:
1328 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1329 break;
1330 case 0x2:
1331 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1332 break;
1333 case 0x3:
1334 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1335 break;
1336 case 0x4:
1337 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1338 break;
1339 case 0x5:
1340 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1341 break;
1342 case 0x6:
1343 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1344 break;
1345 case 0x7:
1346 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1347 break;
1348 case 0x8:
1349 gen_op_eval_ba(r_dst);
1350 break;
1351 case 0x9:
1352 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1353 break;
1354 case 0xa:
1355 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1356 break;
1357 case 0xb:
1358 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1359 break;
1360 case 0xc:
1361 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1362 break;
1363 case 0xd:
1364 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1365 break;
1366 case 0xe:
1367 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1368 break;
1369 case 0xf:
1370 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1371 break;
1375 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1376 DisasContext *dc)
1378 DisasCompare cmp;
1379 gen_compare(&cmp, cc, cond, dc);
1381 /* The interface is to return a boolean in r_dst. */
1382 if (cmp.is_bool) {
1383 tcg_gen_mov_tl(r_dst, cmp.c1);
1384 } else {
1385 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1388 free_compare(&cmp);
1391 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1393 DisasCompare cmp;
1394 gen_fcompare(&cmp, cc, cond);
1396 /* The interface is to return a boolean in r_dst. */
1397 if (cmp.is_bool) {
1398 tcg_gen_mov_tl(r_dst, cmp.c1);
1399 } else {
1400 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1403 free_compare(&cmp);
1406 #ifdef TARGET_SPARC64
1407 // Inverted logic
1408 static const int gen_tcg_cond_reg[8] = {
1410 TCG_COND_NE,
1411 TCG_COND_GT,
1412 TCG_COND_GE,
1414 TCG_COND_EQ,
1415 TCG_COND_LE,
1416 TCG_COND_LT,
1419 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1421 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1422 cmp->is_bool = false;
1423 cmp->g1 = true;
1424 cmp->g2 = false;
1425 cmp->c1 = r_src;
1426 cmp->c2 = tcg_const_tl(0);
1429 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1431 DisasCompare cmp;
1432 gen_compare_reg(&cmp, cond, r_src);
1434 /* The interface is to return a boolean in r_dst. */
1435 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1437 free_compare(&cmp);
1439 #endif
1441 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1443 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1444 target_ulong target = dc->pc + offset;
1446 #ifdef TARGET_SPARC64
1447 if (unlikely(AM_CHECK(dc))) {
1448 target &= 0xffffffffULL;
1450 #endif
1451 if (cond == 0x0) {
1452 /* unconditional not taken */
1453 if (a) {
1454 dc->pc = dc->npc + 4;
1455 dc->npc = dc->pc + 4;
1456 } else {
1457 dc->pc = dc->npc;
1458 dc->npc = dc->pc + 4;
1460 } else if (cond == 0x8) {
1461 /* unconditional taken */
1462 if (a) {
1463 dc->pc = target;
1464 dc->npc = dc->pc + 4;
1465 } else {
1466 dc->pc = dc->npc;
1467 dc->npc = target;
1468 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1470 } else {
1471 flush_cond(dc);
1472 gen_cond(cpu_cond, cc, cond, dc);
1473 if (a) {
1474 gen_branch_a(dc, target);
1475 } else {
1476 gen_branch_n(dc, target);
1481 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1483 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1484 target_ulong target = dc->pc + offset;
1486 #ifdef TARGET_SPARC64
1487 if (unlikely(AM_CHECK(dc))) {
1488 target &= 0xffffffffULL;
1490 #endif
1491 if (cond == 0x0) {
1492 /* unconditional not taken */
1493 if (a) {
1494 dc->pc = dc->npc + 4;
1495 dc->npc = dc->pc + 4;
1496 } else {
1497 dc->pc = dc->npc;
1498 dc->npc = dc->pc + 4;
1500 } else if (cond == 0x8) {
1501 /* unconditional taken */
1502 if (a) {
1503 dc->pc = target;
1504 dc->npc = dc->pc + 4;
1505 } else {
1506 dc->pc = dc->npc;
1507 dc->npc = target;
1508 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1510 } else {
1511 flush_cond(dc);
1512 gen_fcond(cpu_cond, cc, cond);
1513 if (a) {
1514 gen_branch_a(dc, target);
1515 } else {
1516 gen_branch_n(dc, target);
1521 #ifdef TARGET_SPARC64
1522 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1523 TCGv r_reg)
1525 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1526 target_ulong target = dc->pc + offset;
1528 if (unlikely(AM_CHECK(dc))) {
1529 target &= 0xffffffffULL;
1531 flush_cond(dc);
1532 gen_cond_reg(cpu_cond, cond, r_reg);
1533 if (a) {
1534 gen_branch_a(dc, target);
1535 } else {
1536 gen_branch_n(dc, target);
1540 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1542 switch (fccno) {
1543 case 0:
1544 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1545 break;
1546 case 1:
1547 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1548 break;
1549 case 2:
1550 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1551 break;
1552 case 3:
1553 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1554 break;
1558 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1560 switch (fccno) {
1561 case 0:
1562 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1563 break;
1564 case 1:
1565 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1566 break;
1567 case 2:
1568 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1569 break;
1570 case 3:
1571 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1572 break;
1576 static inline void gen_op_fcmpq(int fccno)
1578 switch (fccno) {
1579 case 0:
1580 gen_helper_fcmpq(cpu_fsr, cpu_env);
1581 break;
1582 case 1:
1583 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1584 break;
1585 case 2:
1586 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1587 break;
1588 case 3:
1589 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1590 break;
1594 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1596 switch (fccno) {
1597 case 0:
1598 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1599 break;
1600 case 1:
1601 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1602 break;
1603 case 2:
1604 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1605 break;
1606 case 3:
1607 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1608 break;
1612 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1614 switch (fccno) {
1615 case 0:
1616 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1617 break;
1618 case 1:
1619 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1620 break;
1621 case 2:
1622 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1623 break;
1624 case 3:
1625 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1626 break;
1630 static inline void gen_op_fcmpeq(int fccno)
1632 switch (fccno) {
1633 case 0:
1634 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1635 break;
1636 case 1:
1637 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1638 break;
1639 case 2:
1640 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1641 break;
1642 case 3:
1643 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1644 break;
1648 #else
1650 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1652 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1655 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1657 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1660 static inline void gen_op_fcmpq(int fccno)
1662 gen_helper_fcmpq(cpu_fsr, cpu_env);
1665 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1667 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1670 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1672 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1675 static inline void gen_op_fcmpeq(int fccno)
1677 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1679 #endif
1681 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1683 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1684 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1685 gen_exception(dc, TT_FP_EXCP);
1688 static int gen_trap_ifnofpu(DisasContext *dc)
1690 #if !defined(CONFIG_USER_ONLY)
1691 if (!dc->fpu_enabled) {
1692 gen_exception(dc, TT_NFPU_INSN);
1693 return 1;
1695 #endif
1696 return 0;
1699 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1701 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1704 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1705 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1707 TCGv_i32 dst, src;
1709 src = gen_load_fpr_F(dc, rs);
1710 dst = gen_dest_fpr_F(dc);
1712 gen(dst, cpu_env, src);
1713 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1715 gen_store_fpr_F(dc, rd, dst);
1718 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_i32))
1721 TCGv_i32 dst, src;
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1726 gen(dst, src);
1728 gen_store_fpr_F(dc, rd, dst);
1731 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1732 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1734 TCGv_i32 dst, src1, src2;
1736 src1 = gen_load_fpr_F(dc, rs1);
1737 src2 = gen_load_fpr_F(dc, rs2);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, cpu_env, src1, src2);
1741 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1743 gen_store_fpr_F(dc, rd, dst);
1746 #ifdef TARGET_SPARC64
1747 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1748 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1750 TCGv_i32 dst, src1, src2;
1752 src1 = gen_load_fpr_F(dc, rs1);
1753 src2 = gen_load_fpr_F(dc, rs2);
1754 dst = gen_dest_fpr_F(dc);
1756 gen(dst, src1, src2);
1758 gen_store_fpr_F(dc, rd, dst);
1760 #endif
1762 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1763 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1765 TCGv_i64 dst, src;
1767 src = gen_load_fpr_D(dc, rs);
1768 dst = gen_dest_fpr_D(dc, rd);
1770 gen(dst, cpu_env, src);
1771 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1773 gen_store_fpr_D(dc, rd, dst);
1776 #ifdef TARGET_SPARC64
1777 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1778 void (*gen)(TCGv_i64, TCGv_i64))
1780 TCGv_i64 dst, src;
1782 src = gen_load_fpr_D(dc, rs);
1783 dst = gen_dest_fpr_D(dc, rd);
1785 gen(dst, src);
1787 gen_store_fpr_D(dc, rd, dst);
1789 #endif
1791 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src1, src2;
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1800 gen(dst, cpu_env, src1, src2);
1801 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1803 gen_store_fpr_D(dc, rd, dst);
1806 #ifdef TARGET_SPARC64
1807 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1808 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1810 TCGv_i64 dst, src1, src2;
1812 src1 = gen_load_fpr_D(dc, rs1);
1813 src2 = gen_load_fpr_D(dc, rs2);
1814 dst = gen_dest_fpr_D(dc, rd);
1816 gen(dst, src1, src2);
1818 gen_store_fpr_D(dc, rd, dst);
1821 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1824 TCGv_i64 dst, src1, src2;
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1830 gen(dst, cpu_gsr, src1, src2);
1832 gen_store_fpr_D(dc, rd, dst);
1835 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 TCGv_i64 dst, src0, src1, src2;
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 src0 = gen_load_fpr_D(dc, rd);
1843 dst = gen_dest_fpr_D(dc, rd);
1845 gen(dst, src0, src1, src2);
1847 gen_store_fpr_D(dc, rd, dst);
1849 #endif
1851 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1852 void (*gen)(TCGv_ptr))
1854 gen_op_load_fpr_QT1(QFPREG(rs));
1856 gen(cpu_env);
1857 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1859 gen_op_store_QT0_fpr(QFPREG(rd));
1860 gen_update_fprs_dirty(dc, QFPREG(rd));
1863 #ifdef TARGET_SPARC64
1864 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1865 void (*gen)(TCGv_ptr))
1867 gen_op_load_fpr_QT1(QFPREG(rs));
1869 gen(cpu_env);
1871 gen_op_store_QT0_fpr(QFPREG(rd));
1872 gen_update_fprs_dirty(dc, QFPREG(rd));
1874 #endif
1876 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1877 void (*gen)(TCGv_ptr))
1879 gen_op_load_fpr_QT0(QFPREG(rs1));
1880 gen_op_load_fpr_QT1(QFPREG(rs2));
1882 gen(cpu_env);
1883 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1889 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1892 TCGv_i64 dst;
1893 TCGv_i32 src1, src2;
1895 src1 = gen_load_fpr_F(dc, rs1);
1896 src2 = gen_load_fpr_F(dc, rs2);
1897 dst = gen_dest_fpr_D(dc, rd);
1899 gen(dst, cpu_env, src1, src2);
1900 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1902 gen_store_fpr_D(dc, rd, dst);
1905 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1906 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1908 TCGv_i64 src1, src2;
1910 src1 = gen_load_fpr_D(dc, rs1);
1911 src2 = gen_load_fpr_D(dc, rs2);
1913 gen(cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916 gen_op_store_QT0_fpr(QFPREG(rd));
1917 gen_update_fprs_dirty(dc, QFPREG(rd));
1920 #ifdef TARGET_SPARC64
1921 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1924 TCGv_i64 dst;
1925 TCGv_i32 src;
1927 src = gen_load_fpr_F(dc, rs);
1928 dst = gen_dest_fpr_D(dc, rd);
1930 gen(dst, cpu_env, src);
1931 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1933 gen_store_fpr_D(dc, rd, dst);
1935 #endif
1937 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1938 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1940 TCGv_i64 dst;
1941 TCGv_i32 src;
1943 src = gen_load_fpr_F(dc, rs);
1944 dst = gen_dest_fpr_D(dc, rd);
1946 gen(dst, cpu_env, src);
1948 gen_store_fpr_D(dc, rd, dst);
1951 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1954 TCGv_i32 dst;
1955 TCGv_i64 src;
1957 src = gen_load_fpr_D(dc, rs);
1958 dst = gen_dest_fpr_F(dc);
1960 gen(dst, cpu_env, src);
1961 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1963 gen_store_fpr_F(dc, rd, dst);
1966 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1967 void (*gen)(TCGv_i32, TCGv_ptr))
1969 TCGv_i32 dst;
1971 gen_op_load_fpr_QT1(QFPREG(rs));
1972 dst = gen_dest_fpr_F(dc);
1974 gen(dst, cpu_env);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977 gen_store_fpr_F(dc, rd, dst);
1980 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i64, TCGv_ptr))
1983 TCGv_i64 dst;
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_D(dc, rd);
1988 gen(dst, cpu_env);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991 gen_store_fpr_D(dc, rd, dst);
1994 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_ptr, TCGv_i32))
1997 TCGv_i32 src;
1999 src = gen_load_fpr_F(dc, rs);
2001 gen(cpu_env, src);
2003 gen_op_store_QT0_fpr(QFPREG(rd));
2004 gen_update_fprs_dirty(dc, QFPREG(rd));
2007 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2008 void (*gen)(TCGv_ptr, TCGv_i64))
2010 TCGv_i64 src;
2012 src = gen_load_fpr_D(dc, rs);
2014 gen(cpu_env, src);
2016 gen_op_store_QT0_fpr(QFPREG(rd));
2017 gen_update_fprs_dirty(dc, QFPREG(rd));
2020 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2021 TCGv addr, int mmu_idx, TCGMemOp memop)
2023 gen_address_mask(dc, addr);
2024 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2027 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2029 TCGv m1 = tcg_const_tl(0xff);
2030 gen_address_mask(dc, addr);
2031 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2032 tcg_temp_free(m1);
2035 /* asi moves */
2036 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2037 typedef enum {
2038 GET_ASI_HELPER,
2039 GET_ASI_EXCP,
2040 GET_ASI_DIRECT,
2041 GET_ASI_DTWINX,
2042 GET_ASI_BLOCK,
2043 GET_ASI_SHORT,
2044 GET_ASI_BCOPY,
2045 GET_ASI_BFILL,
2046 } ASIType;
2048 typedef struct {
2049 ASIType type;
2050 int asi;
2051 int mem_idx;
2052 TCGMemOp memop;
2053 } DisasASI;
2055 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2057 int asi = GET_FIELD(insn, 19, 26);
2058 ASIType type = GET_ASI_HELPER;
2059 int mem_idx = dc->mem_idx;
2061 #ifndef TARGET_SPARC64
2062 /* Before v9, all asis are immediate and privileged. */
2063 if (IS_IMM) {
2064 gen_exception(dc, TT_ILL_INSN);
2065 type = GET_ASI_EXCP;
2066 } else if (supervisor(dc)
2067 /* Note that LEON accepts ASI_USERDATA in user mode, for
2068 use with CASA. Also note that previous versions of
2069 QEMU allowed (and old versions of gcc emitted) ASI_P
2070 for LEON, which is incorrect. */
2071 || (asi == ASI_USERDATA
2072 && (dc->def->features & CPU_FEATURE_CASA))) {
2073 switch (asi) {
2074 case ASI_USERDATA: /* User data access */
2075 mem_idx = MMU_USER_IDX;
2076 type = GET_ASI_DIRECT;
2077 break;
2078 case ASI_KERNELDATA: /* Supervisor data access */
2079 mem_idx = MMU_KERNEL_IDX;
2080 type = GET_ASI_DIRECT;
2081 break;
2082 case ASI_M_BYPASS: /* MMU passthrough */
2083 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2084 mem_idx = MMU_PHYS_IDX;
2085 type = GET_ASI_DIRECT;
2086 break;
2087 case ASI_M_BCOPY: /* Block copy, sta access */
2088 mem_idx = MMU_KERNEL_IDX;
2089 type = GET_ASI_BCOPY;
2090 break;
2091 case ASI_M_BFILL: /* Block fill, stda access */
2092 mem_idx = MMU_KERNEL_IDX;
2093 type = GET_ASI_BFILL;
2094 break;
2097 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
2098 * permissions check in get_physical_address(..).
2100 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
2101 } else {
2102 gen_exception(dc, TT_PRIV_INSN);
2103 type = GET_ASI_EXCP;
2105 #else
2106 if (IS_IMM) {
2107 asi = dc->asi;
2109 /* With v9, all asis below 0x80 are privileged. */
2110 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2111 down that bit into DisasContext. For the moment that's ok,
2112 since the direct implementations below doesn't have any ASIs
2113 in the restricted [0x30, 0x7f] range, and the check will be
2114 done properly in the helper. */
2115 if (!supervisor(dc) && asi < 0x80) {
2116 gen_exception(dc, TT_PRIV_ACT);
2117 type = GET_ASI_EXCP;
2118 } else {
2119 switch (asi) {
2120 case ASI_REAL: /* Bypass */
2121 case ASI_REAL_IO: /* Bypass, non-cacheable */
2122 case ASI_REAL_L: /* Bypass LE */
2123 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2124 case ASI_TWINX_REAL: /* Real address, twinx */
2125 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2126 case ASI_QUAD_LDD_PHYS:
2127 case ASI_QUAD_LDD_PHYS_L:
2128 mem_idx = MMU_PHYS_IDX;
2129 break;
2130 case ASI_N: /* Nucleus */
2131 case ASI_NL: /* Nucleus LE */
2132 case ASI_TWINX_N:
2133 case ASI_TWINX_NL:
2134 case ASI_NUCLEUS_QUAD_LDD:
2135 case ASI_NUCLEUS_QUAD_LDD_L:
2136 if (hypervisor(dc)) {
2137 mem_idx = MMU_PHYS_IDX;
2138 } else {
2139 mem_idx = MMU_NUCLEUS_IDX;
2141 break;
2142 case ASI_AIUP: /* As if user primary */
2143 case ASI_AIUPL: /* As if user primary LE */
2144 case ASI_TWINX_AIUP:
2145 case ASI_TWINX_AIUP_L:
2146 case ASI_BLK_AIUP_4V:
2147 case ASI_BLK_AIUP_L_4V:
2148 case ASI_BLK_AIUP:
2149 case ASI_BLK_AIUPL:
2150 mem_idx = MMU_USER_IDX;
2151 break;
2152 case ASI_AIUS: /* As if user secondary */
2153 case ASI_AIUSL: /* As if user secondary LE */
2154 case ASI_TWINX_AIUS:
2155 case ASI_TWINX_AIUS_L:
2156 case ASI_BLK_AIUS_4V:
2157 case ASI_BLK_AIUS_L_4V:
2158 case ASI_BLK_AIUS:
2159 case ASI_BLK_AIUSL:
2160 mem_idx = MMU_USER_SECONDARY_IDX;
2161 break;
2162 case ASI_S: /* Secondary */
2163 case ASI_SL: /* Secondary LE */
2164 case ASI_TWINX_S:
2165 case ASI_TWINX_SL:
2166 case ASI_BLK_COMMIT_S:
2167 case ASI_BLK_S:
2168 case ASI_BLK_SL:
2169 case ASI_FL8_S:
2170 case ASI_FL8_SL:
2171 case ASI_FL16_S:
2172 case ASI_FL16_SL:
2173 if (mem_idx == MMU_USER_IDX) {
2174 mem_idx = MMU_USER_SECONDARY_IDX;
2175 } else if (mem_idx == MMU_KERNEL_IDX) {
2176 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2178 break;
2179 case ASI_P: /* Primary */
2180 case ASI_PL: /* Primary LE */
2181 case ASI_TWINX_P:
2182 case ASI_TWINX_PL:
2183 case ASI_BLK_COMMIT_P:
2184 case ASI_BLK_P:
2185 case ASI_BLK_PL:
2186 case ASI_FL8_P:
2187 case ASI_FL8_PL:
2188 case ASI_FL16_P:
2189 case ASI_FL16_PL:
2190 break;
2192 switch (asi) {
2193 case ASI_REAL:
2194 case ASI_REAL_IO:
2195 case ASI_REAL_L:
2196 case ASI_REAL_IO_L:
2197 case ASI_N:
2198 case ASI_NL:
2199 case ASI_AIUP:
2200 case ASI_AIUPL:
2201 case ASI_AIUS:
2202 case ASI_AIUSL:
2203 case ASI_S:
2204 case ASI_SL:
2205 case ASI_P:
2206 case ASI_PL:
2207 type = GET_ASI_DIRECT;
2208 break;
2209 case ASI_TWINX_REAL:
2210 case ASI_TWINX_REAL_L:
2211 case ASI_TWINX_N:
2212 case ASI_TWINX_NL:
2213 case ASI_TWINX_AIUP:
2214 case ASI_TWINX_AIUP_L:
2215 case ASI_TWINX_AIUS:
2216 case ASI_TWINX_AIUS_L:
2217 case ASI_TWINX_P:
2218 case ASI_TWINX_PL:
2219 case ASI_TWINX_S:
2220 case ASI_TWINX_SL:
2221 case ASI_QUAD_LDD_PHYS:
2222 case ASI_QUAD_LDD_PHYS_L:
2223 case ASI_NUCLEUS_QUAD_LDD:
2224 case ASI_NUCLEUS_QUAD_LDD_L:
2225 type = GET_ASI_DTWINX;
2226 break;
2227 case ASI_BLK_COMMIT_P:
2228 case ASI_BLK_COMMIT_S:
2229 case ASI_BLK_AIUP_4V:
2230 case ASI_BLK_AIUP_L_4V:
2231 case ASI_BLK_AIUP:
2232 case ASI_BLK_AIUPL:
2233 case ASI_BLK_AIUS_4V:
2234 case ASI_BLK_AIUS_L_4V:
2235 case ASI_BLK_AIUS:
2236 case ASI_BLK_AIUSL:
2237 case ASI_BLK_S:
2238 case ASI_BLK_SL:
2239 case ASI_BLK_P:
2240 case ASI_BLK_PL:
2241 type = GET_ASI_BLOCK;
2242 break;
2243 case ASI_FL8_S:
2244 case ASI_FL8_SL:
2245 case ASI_FL8_P:
2246 case ASI_FL8_PL:
2247 memop = MO_UB;
2248 type = GET_ASI_SHORT;
2249 break;
2250 case ASI_FL16_S:
2251 case ASI_FL16_SL:
2252 case ASI_FL16_P:
2253 case ASI_FL16_PL:
2254 memop = MO_TEUW;
2255 type = GET_ASI_SHORT;
2256 break;
2258 /* The little-endian asis all have bit 3 set. */
2259 if (asi & 8) {
2260 memop ^= MO_BSWAP;
2263 #endif
2265 return (DisasASI){ type, asi, mem_idx, memop };
2268 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2269 int insn, TCGMemOp memop)
2271 DisasASI da = get_asi(dc, insn, memop);
2273 switch (da.type) {
2274 case GET_ASI_EXCP:
2275 break;
2276 case GET_ASI_DTWINX: /* Reserved for ldda. */
2277 gen_exception(dc, TT_ILL_INSN);
2278 break;
2279 case GET_ASI_DIRECT:
2280 gen_address_mask(dc, addr);
2281 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2282 break;
2283 default:
2285 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2286 TCGv_i32 r_mop = tcg_const_i32(memop);
2288 save_state(dc);
2289 #ifdef TARGET_SPARC64
2290 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2291 #else
2293 TCGv_i64 t64 = tcg_temp_new_i64();
2294 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2295 tcg_gen_trunc_i64_tl(dst, t64);
2296 tcg_temp_free_i64(t64);
2298 #endif
2299 tcg_temp_free_i32(r_mop);
2300 tcg_temp_free_i32(r_asi);
2302 break;
2306 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2307 int insn, TCGMemOp memop)
2309 DisasASI da = get_asi(dc, insn, memop);
2311 switch (da.type) {
2312 case GET_ASI_EXCP:
2313 break;
2314 case GET_ASI_DTWINX: /* Reserved for stda. */
2315 #ifndef TARGET_SPARC64
2316 gen_exception(dc, TT_ILL_INSN);
2317 break;
2318 #else
2319 if (!(dc->def->features & CPU_FEATURE_HYPV)) {
2320 /* Pre OpenSPARC CPUs don't have these */
2321 gen_exception(dc, TT_ILL_INSN);
2322 return;
2324 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2325 * are ST_BLKINIT_ ASIs */
2326 /* fall through */
2327 #endif
2328 case GET_ASI_DIRECT:
2329 gen_address_mask(dc, addr);
2330 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2331 break;
2332 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2333 case GET_ASI_BCOPY:
2334 /* Copy 32 bytes from the address in SRC to ADDR. */
2335 /* ??? The original qemu code suggests 4-byte alignment, dropping
2336 the low bits, but the only place I can see this used is in the
2337 Linux kernel with 32 byte alignment, which would make more sense
2338 as a cacheline-style operation. */
2340 TCGv saddr = tcg_temp_new();
2341 TCGv daddr = tcg_temp_new();
2342 TCGv four = tcg_const_tl(4);
2343 TCGv_i32 tmp = tcg_temp_new_i32();
2344 int i;
2346 tcg_gen_andi_tl(saddr, src, -4);
2347 tcg_gen_andi_tl(daddr, addr, -4);
2348 for (i = 0; i < 32; i += 4) {
2349 /* Since the loads and stores are paired, allow the
2350 copy to happen in the host endianness. */
2351 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2352 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2353 tcg_gen_add_tl(saddr, saddr, four);
2354 tcg_gen_add_tl(daddr, daddr, four);
2357 tcg_temp_free(saddr);
2358 tcg_temp_free(daddr);
2359 tcg_temp_free(four);
2360 tcg_temp_free_i32(tmp);
2362 break;
2363 #endif
2364 default:
2366 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2367 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2369 save_state(dc);
2370 #ifdef TARGET_SPARC64
2371 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2372 #else
2374 TCGv_i64 t64 = tcg_temp_new_i64();
2375 tcg_gen_extu_tl_i64(t64, src);
2376 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2377 tcg_temp_free_i64(t64);
2379 #endif
2380 tcg_temp_free_i32(r_mop);
2381 tcg_temp_free_i32(r_asi);
2383 /* A write to a TLB register may alter page maps. End the TB. */
2384 dc->npc = DYNAMIC_PC;
2386 break;
2390 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2391 TCGv addr, int insn)
2393 DisasASI da = get_asi(dc, insn, MO_TEUL);
2395 switch (da.type) {
2396 case GET_ASI_EXCP:
2397 break;
2398 case GET_ASI_DIRECT:
2399 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2400 break;
2401 default:
2402 /* ??? Should be DAE_invalid_asi. */
2403 gen_exception(dc, TT_DATA_ACCESS);
2404 break;
2408 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2409 int insn, int rd)
2411 DisasASI da = get_asi(dc, insn, MO_TEUL);
2412 TCGv oldv;
2414 switch (da.type) {
2415 case GET_ASI_EXCP:
2416 return;
2417 case GET_ASI_DIRECT:
2418 oldv = tcg_temp_new();
2419 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2420 da.mem_idx, da.memop);
2421 gen_store_gpr(dc, rd, oldv);
2422 tcg_temp_free(oldv);
2423 break;
2424 default:
2425 /* ??? Should be DAE_invalid_asi. */
2426 gen_exception(dc, TT_DATA_ACCESS);
2427 break;
2431 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2433 DisasASI da = get_asi(dc, insn, MO_UB);
2435 switch (da.type) {
2436 case GET_ASI_EXCP:
2437 break;
2438 case GET_ASI_DIRECT:
2439 gen_ldstub(dc, dst, addr, da.mem_idx);
2440 break;
2441 default:
2442 /* ??? In theory, this should be raise DAE_invalid_asi.
2443 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2444 if (tb_cflags(dc->tb) & CF_PARALLEL) {
2445 gen_helper_exit_atomic(cpu_env);
2446 } else {
2447 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2448 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2449 TCGv_i64 s64, t64;
2451 save_state(dc);
2452 t64 = tcg_temp_new_i64();
2453 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2455 s64 = tcg_const_i64(0xff);
2456 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2457 tcg_temp_free_i64(s64);
2458 tcg_temp_free_i32(r_mop);
2459 tcg_temp_free_i32(r_asi);
2461 tcg_gen_trunc_i64_tl(dst, t64);
2462 tcg_temp_free_i64(t64);
2464 /* End the TB. */
2465 dc->npc = DYNAMIC_PC;
2467 break;
2470 #endif
2472 #ifdef TARGET_SPARC64
2473 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2474 int insn, int size, int rd)
2476 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2477 TCGv_i32 d32;
2478 TCGv_i64 d64;
2480 switch (da.type) {
2481 case GET_ASI_EXCP:
2482 break;
2484 case GET_ASI_DIRECT:
2485 gen_address_mask(dc, addr);
2486 switch (size) {
2487 case 4:
2488 d32 = gen_dest_fpr_F(dc);
2489 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2490 gen_store_fpr_F(dc, rd, d32);
2491 break;
2492 case 8:
2493 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2494 da.memop | MO_ALIGN_4);
2495 break;
2496 case 16:
2497 d64 = tcg_temp_new_i64();
2498 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2499 tcg_gen_addi_tl(addr, addr, 8);
2500 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2501 da.memop | MO_ALIGN_4);
2502 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2503 tcg_temp_free_i64(d64);
2504 break;
2505 default:
2506 g_assert_not_reached();
2508 break;
2510 case GET_ASI_BLOCK:
2511 /* Valid for lddfa on aligned registers only. */
2512 if (size == 8 && (rd & 7) == 0) {
2513 TCGMemOp memop;
2514 TCGv eight;
2515 int i;
2517 gen_address_mask(dc, addr);
2519 /* The first operation checks required alignment. */
2520 memop = da.memop | MO_ALIGN_64;
2521 eight = tcg_const_tl(8);
2522 for (i = 0; ; ++i) {
2523 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2524 da.mem_idx, memop);
2525 if (i == 7) {
2526 break;
2528 tcg_gen_add_tl(addr, addr, eight);
2529 memop = da.memop;
2531 tcg_temp_free(eight);
2532 } else {
2533 gen_exception(dc, TT_ILL_INSN);
2535 break;
2537 case GET_ASI_SHORT:
2538 /* Valid for lddfa only. */
2539 if (size == 8) {
2540 gen_address_mask(dc, addr);
2541 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2542 } else {
2543 gen_exception(dc, TT_ILL_INSN);
2545 break;
2547 default:
2549 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2550 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2552 save_state(dc);
2553 /* According to the table in the UA2011 manual, the only
2554 other asis that are valid for ldfa/lddfa/ldqfa are
2555 the NO_FAULT asis. We still need a helper for these,
2556 but we can just use the integer asi helper for them. */
2557 switch (size) {
2558 case 4:
2559 d64 = tcg_temp_new_i64();
2560 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2561 d32 = gen_dest_fpr_F(dc);
2562 tcg_gen_extrl_i64_i32(d32, d64);
2563 tcg_temp_free_i64(d64);
2564 gen_store_fpr_F(dc, rd, d32);
2565 break;
2566 case 8:
2567 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2568 break;
2569 case 16:
2570 d64 = tcg_temp_new_i64();
2571 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2572 tcg_gen_addi_tl(addr, addr, 8);
2573 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2574 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2575 tcg_temp_free_i64(d64);
2576 break;
2577 default:
2578 g_assert_not_reached();
2580 tcg_temp_free_i32(r_mop);
2581 tcg_temp_free_i32(r_asi);
2583 break;
2587 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2588 int insn, int size, int rd)
2590 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2591 TCGv_i32 d32;
2593 switch (da.type) {
2594 case GET_ASI_EXCP:
2595 break;
2597 case GET_ASI_DIRECT:
2598 gen_address_mask(dc, addr);
2599 switch (size) {
2600 case 4:
2601 d32 = gen_load_fpr_F(dc, rd);
2602 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2603 break;
2604 case 8:
2605 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2606 da.memop | MO_ALIGN_4);
2607 break;
2608 case 16:
2609 /* Only 4-byte alignment required. However, it is legal for the
2610 cpu to signal the alignment fault, and the OS trap handler is
2611 required to fix it up. Requiring 16-byte alignment here avoids
2612 having to probe the second page before performing the first
2613 write. */
2614 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2615 da.memop | MO_ALIGN_16);
2616 tcg_gen_addi_tl(addr, addr, 8);
2617 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2618 break;
2619 default:
2620 g_assert_not_reached();
2622 break;
2624 case GET_ASI_BLOCK:
2625 /* Valid for stdfa on aligned registers only. */
2626 if (size == 8 && (rd & 7) == 0) {
2627 TCGMemOp memop;
2628 TCGv eight;
2629 int i;
2631 gen_address_mask(dc, addr);
2633 /* The first operation checks required alignment. */
2634 memop = da.memop | MO_ALIGN_64;
2635 eight = tcg_const_tl(8);
2636 for (i = 0; ; ++i) {
2637 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2638 da.mem_idx, memop);
2639 if (i == 7) {
2640 break;
2642 tcg_gen_add_tl(addr, addr, eight);
2643 memop = da.memop;
2645 tcg_temp_free(eight);
2646 } else {
2647 gen_exception(dc, TT_ILL_INSN);
2649 break;
2651 case GET_ASI_SHORT:
2652 /* Valid for stdfa only. */
2653 if (size == 8) {
2654 gen_address_mask(dc, addr);
2655 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2656 } else {
2657 gen_exception(dc, TT_ILL_INSN);
2659 break;
2661 default:
2662 /* According to the table in the UA2011 manual, the only
2663 other asis that are valid for ldfa/lddfa/ldqfa are
2664 the PST* asis, which aren't currently handled. */
2665 gen_exception(dc, TT_ILL_INSN);
2666 break;
2670 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2672 DisasASI da = get_asi(dc, insn, MO_TEQ);
2673 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2674 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2676 switch (da.type) {
2677 case GET_ASI_EXCP:
2678 return;
2680 case GET_ASI_DTWINX:
2681 gen_address_mask(dc, addr);
2682 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2683 tcg_gen_addi_tl(addr, addr, 8);
2684 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2685 break;
2687 case GET_ASI_DIRECT:
2689 TCGv_i64 tmp = tcg_temp_new_i64();
2691 gen_address_mask(dc, addr);
2692 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2694 /* Note that LE ldda acts as if each 32-bit register
2695 result is byte swapped. Having just performed one
2696 64-bit bswap, we need now to swap the writebacks. */
2697 if ((da.memop & MO_BSWAP) == MO_TE) {
2698 tcg_gen_extr32_i64(lo, hi, tmp);
2699 } else {
2700 tcg_gen_extr32_i64(hi, lo, tmp);
2702 tcg_temp_free_i64(tmp);
2704 break;
2706 default:
2707 /* ??? In theory we've handled all of the ASIs that are valid
2708 for ldda, and this should raise DAE_invalid_asi. However,
2709 real hardware allows others. This can be seen with e.g.
2710 FreeBSD 10.3 wrt ASI_IC_TAG. */
2712 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2713 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2714 TCGv_i64 tmp = tcg_temp_new_i64();
2716 save_state(dc);
2717 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2718 tcg_temp_free_i32(r_asi);
2719 tcg_temp_free_i32(r_mop);
2721 /* See above. */
2722 if ((da.memop & MO_BSWAP) == MO_TE) {
2723 tcg_gen_extr32_i64(lo, hi, tmp);
2724 } else {
2725 tcg_gen_extr32_i64(hi, lo, tmp);
2727 tcg_temp_free_i64(tmp);
2729 break;
2732 gen_store_gpr(dc, rd, hi);
2733 gen_store_gpr(dc, rd + 1, lo);
2736 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2737 int insn, int rd)
2739 DisasASI da = get_asi(dc, insn, MO_TEQ);
2740 TCGv lo = gen_load_gpr(dc, rd + 1);
2742 switch (da.type) {
2743 case GET_ASI_EXCP:
2744 break;
2746 case GET_ASI_DTWINX:
2747 gen_address_mask(dc, addr);
2748 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2749 tcg_gen_addi_tl(addr, addr, 8);
2750 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2751 break;
2753 case GET_ASI_DIRECT:
2755 TCGv_i64 t64 = tcg_temp_new_i64();
2757 /* Note that LE stda acts as if each 32-bit register result is
2758 byte swapped. We will perform one 64-bit LE store, so now
2759 we must swap the order of the construction. */
2760 if ((da.memop & MO_BSWAP) == MO_TE) {
2761 tcg_gen_concat32_i64(t64, lo, hi);
2762 } else {
2763 tcg_gen_concat32_i64(t64, hi, lo);
2765 gen_address_mask(dc, addr);
2766 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2767 tcg_temp_free_i64(t64);
2769 break;
2771 default:
2772 /* ??? In theory we've handled all of the ASIs that are valid
2773 for stda, and this should raise DAE_invalid_asi. */
2775 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2776 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2777 TCGv_i64 t64 = tcg_temp_new_i64();
2779 /* See above. */
2780 if ((da.memop & MO_BSWAP) == MO_TE) {
2781 tcg_gen_concat32_i64(t64, lo, hi);
2782 } else {
2783 tcg_gen_concat32_i64(t64, hi, lo);
2786 save_state(dc);
2787 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2788 tcg_temp_free_i32(r_mop);
2789 tcg_temp_free_i32(r_asi);
2790 tcg_temp_free_i64(t64);
2792 break;
2796 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2797 int insn, int rd)
2799 DisasASI da = get_asi(dc, insn, MO_TEQ);
2800 TCGv oldv;
2802 switch (da.type) {
2803 case GET_ASI_EXCP:
2804 return;
2805 case GET_ASI_DIRECT:
2806 oldv = tcg_temp_new();
2807 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2808 da.mem_idx, da.memop);
2809 gen_store_gpr(dc, rd, oldv);
2810 tcg_temp_free(oldv);
2811 break;
2812 default:
2813 /* ??? Should be DAE_invalid_asi. */
2814 gen_exception(dc, TT_DATA_ACCESS);
2815 break;
2819 #elif !defined(CONFIG_USER_ONLY)
2820 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2822 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2823 whereby "rd + 1" elicits "error: array subscript is above array".
2824 Since we have already asserted that rd is even, the semantics
2825 are unchanged. */
2826 TCGv lo = gen_dest_gpr(dc, rd | 1);
2827 TCGv hi = gen_dest_gpr(dc, rd);
2828 TCGv_i64 t64 = tcg_temp_new_i64();
2829 DisasASI da = get_asi(dc, insn, MO_TEQ);
2831 switch (da.type) {
2832 case GET_ASI_EXCP:
2833 tcg_temp_free_i64(t64);
2834 return;
2835 case GET_ASI_DIRECT:
2836 gen_address_mask(dc, addr);
2837 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2838 break;
2839 default:
2841 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2842 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2844 save_state(dc);
2845 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2846 tcg_temp_free_i32(r_mop);
2847 tcg_temp_free_i32(r_asi);
2849 break;
2852 tcg_gen_extr_i64_i32(lo, hi, t64);
2853 tcg_temp_free_i64(t64);
2854 gen_store_gpr(dc, rd | 1, lo);
2855 gen_store_gpr(dc, rd, hi);
2858 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2859 int insn, int rd)
2861 DisasASI da = get_asi(dc, insn, MO_TEQ);
2862 TCGv lo = gen_load_gpr(dc, rd + 1);
2863 TCGv_i64 t64 = tcg_temp_new_i64();
2865 tcg_gen_concat_tl_i64(t64, lo, hi);
2867 switch (da.type) {
2868 case GET_ASI_EXCP:
2869 break;
2870 case GET_ASI_DIRECT:
2871 gen_address_mask(dc, addr);
2872 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2873 break;
2874 case GET_ASI_BFILL:
2875 /* Store 32 bytes of T64 to ADDR. */
2876 /* ??? The original qemu code suggests 8-byte alignment, dropping
2877 the low bits, but the only place I can see this used is in the
2878 Linux kernel with 32 byte alignment, which would make more sense
2879 as a cacheline-style operation. */
2881 TCGv d_addr = tcg_temp_new();
2882 TCGv eight = tcg_const_tl(8);
2883 int i;
2885 tcg_gen_andi_tl(d_addr, addr, -8);
2886 for (i = 0; i < 32; i += 8) {
2887 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2888 tcg_gen_add_tl(d_addr, d_addr, eight);
2891 tcg_temp_free(d_addr);
2892 tcg_temp_free(eight);
2894 break;
2895 default:
2897 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2898 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2900 save_state(dc);
2901 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2902 tcg_temp_free_i32(r_mop);
2903 tcg_temp_free_i32(r_asi);
2905 break;
2908 tcg_temp_free_i64(t64);
2910 #endif
2912 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2914 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2915 return gen_load_gpr(dc, rs1);
2918 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2920 if (IS_IMM) { /* immediate */
2921 target_long simm = GET_FIELDs(insn, 19, 31);
2922 TCGv t = get_temp_tl(dc);
2923 tcg_gen_movi_tl(t, simm);
2924 return t;
2925 } else { /* register */
2926 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2927 return gen_load_gpr(dc, rs2);
2931 #ifdef TARGET_SPARC64
2932 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2934 TCGv_i32 c32, zero, dst, s1, s2;
2936 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2937 or fold the comparison down to 32 bits and use movcond_i32. Choose
2938 the later. */
2939 c32 = tcg_temp_new_i32();
2940 if (cmp->is_bool) {
2941 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2942 } else {
2943 TCGv_i64 c64 = tcg_temp_new_i64();
2944 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2945 tcg_gen_extrl_i64_i32(c32, c64);
2946 tcg_temp_free_i64(c64);
2949 s1 = gen_load_fpr_F(dc, rs);
2950 s2 = gen_load_fpr_F(dc, rd);
2951 dst = gen_dest_fpr_F(dc);
2952 zero = tcg_const_i32(0);
2954 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2956 tcg_temp_free_i32(c32);
2957 tcg_temp_free_i32(zero);
2958 gen_store_fpr_F(dc, rd, dst);
2961 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2963 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2964 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2965 gen_load_fpr_D(dc, rs),
2966 gen_load_fpr_D(dc, rd));
2967 gen_store_fpr_D(dc, rd, dst);
2970 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2972 int qd = QFPREG(rd);
2973 int qs = QFPREG(rs);
2975 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2976 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2977 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2978 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2980 gen_update_fprs_dirty(dc, qd);
2983 #ifndef CONFIG_USER_ONLY
2984 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2986 TCGv_i32 r_tl = tcg_temp_new_i32();
2988 /* load env->tl into r_tl */
2989 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2991 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2992 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2994 /* calculate offset to current trap state from env->ts, reuse r_tl */
2995 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2996 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2998 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
3000 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
3001 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
3002 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
3003 tcg_temp_free_ptr(r_tl_tmp);
3006 tcg_temp_free_i32(r_tl);
3008 #endif
3010 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
3011 int width, bool cc, bool left)
3013 TCGv lo1, lo2, t1, t2;
3014 uint64_t amask, tabl, tabr;
3015 int shift, imask, omask;
3017 if (cc) {
3018 tcg_gen_mov_tl(cpu_cc_src, s1);
3019 tcg_gen_mov_tl(cpu_cc_src2, s2);
3020 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3021 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3022 dc->cc_op = CC_OP_SUB;
3025 /* Theory of operation: there are two tables, left and right (not to
3026 be confused with the left and right versions of the opcode). These
3027 are indexed by the low 3 bits of the inputs. To make things "easy",
3028 these tables are loaded into two constants, TABL and TABR below.
3029 The operation index = (input & imask) << shift calculates the index
3030 into the constant, while val = (table >> index) & omask calculates
3031 the value we're looking for. */
3032 switch (width) {
3033 case 8:
3034 imask = 0x7;
3035 shift = 3;
3036 omask = 0xff;
3037 if (left) {
3038 tabl = 0x80c0e0f0f8fcfeffULL;
3039 tabr = 0xff7f3f1f0f070301ULL;
3040 } else {
3041 tabl = 0x0103070f1f3f7fffULL;
3042 tabr = 0xfffefcf8f0e0c080ULL;
3044 break;
3045 case 16:
3046 imask = 0x6;
3047 shift = 1;
3048 omask = 0xf;
3049 if (left) {
3050 tabl = 0x8cef;
3051 tabr = 0xf731;
3052 } else {
3053 tabl = 0x137f;
3054 tabr = 0xfec8;
3056 break;
3057 case 32:
3058 imask = 0x4;
3059 shift = 0;
3060 omask = 0x3;
3061 if (left) {
3062 tabl = (2 << 2) | 3;
3063 tabr = (3 << 2) | 1;
3064 } else {
3065 tabl = (1 << 2) | 3;
3066 tabr = (3 << 2) | 2;
3068 break;
3069 default:
3070 abort();
3073 lo1 = tcg_temp_new();
3074 lo2 = tcg_temp_new();
3075 tcg_gen_andi_tl(lo1, s1, imask);
3076 tcg_gen_andi_tl(lo2, s2, imask);
3077 tcg_gen_shli_tl(lo1, lo1, shift);
3078 tcg_gen_shli_tl(lo2, lo2, shift);
3080 t1 = tcg_const_tl(tabl);
3081 t2 = tcg_const_tl(tabr);
3082 tcg_gen_shr_tl(lo1, t1, lo1);
3083 tcg_gen_shr_tl(lo2, t2, lo2);
3084 tcg_gen_andi_tl(dst, lo1, omask);
3085 tcg_gen_andi_tl(lo2, lo2, omask);
3087 amask = -8;
3088 if (AM_CHECK(dc)) {
3089 amask &= 0xffffffffULL;
3091 tcg_gen_andi_tl(s1, s1, amask);
3092 tcg_gen_andi_tl(s2, s2, amask);
3094 /* We want to compute
3095 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3096 We've already done dst = lo1, so this reduces to
3097 dst &= (s1 == s2 ? -1 : lo2)
3098 Which we perform by
3099 lo2 |= -(s1 == s2)
3100 dst &= lo2
3102 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3103 tcg_gen_neg_tl(t1, t1);
3104 tcg_gen_or_tl(lo2, lo2, t1);
3105 tcg_gen_and_tl(dst, dst, lo2);
3107 tcg_temp_free(lo1);
3108 tcg_temp_free(lo2);
3109 tcg_temp_free(t1);
3110 tcg_temp_free(t2);
3113 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3115 TCGv tmp = tcg_temp_new();
3117 tcg_gen_add_tl(tmp, s1, s2);
3118 tcg_gen_andi_tl(dst, tmp, -8);
3119 if (left) {
3120 tcg_gen_neg_tl(tmp, tmp);
3122 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3124 tcg_temp_free(tmp);
3127 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3129 TCGv t1, t2, shift;
3131 t1 = tcg_temp_new();
3132 t2 = tcg_temp_new();
3133 shift = tcg_temp_new();
3135 tcg_gen_andi_tl(shift, gsr, 7);
3136 tcg_gen_shli_tl(shift, shift, 3);
3137 tcg_gen_shl_tl(t1, s1, shift);
3139 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3140 shift of (up to 63) followed by a constant shift of 1. */
3141 tcg_gen_xori_tl(shift, shift, 63);
3142 tcg_gen_shr_tl(t2, s2, shift);
3143 tcg_gen_shri_tl(t2, t2, 1);
3145 tcg_gen_or_tl(dst, t1, t2);
3147 tcg_temp_free(t1);
3148 tcg_temp_free(t2);
3149 tcg_temp_free(shift);
3151 #endif
3153 #define CHECK_IU_FEATURE(dc, FEATURE) \
3154 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3155 goto illegal_insn;
3156 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3157 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3158 goto nfpu_insn;
3160 /* before an instruction, dc->pc must be static */
3161 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3163 unsigned int opc, rs1, rs2, rd;
3164 TCGv cpu_src1, cpu_src2;
3165 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3166 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3167 target_long simm;
3169 opc = GET_FIELD(insn, 0, 1);
3170 rd = GET_FIELD(insn, 2, 6);
3172 switch (opc) {
3173 case 0: /* branches/sethi */
3175 unsigned int xop = GET_FIELD(insn, 7, 9);
3176 int32_t target;
3177 switch (xop) {
3178 #ifdef TARGET_SPARC64
3179 case 0x1: /* V9 BPcc */
3181 int cc;
3183 target = GET_FIELD_SP(insn, 0, 18);
3184 target = sign_extend(target, 19);
3185 target <<= 2;
3186 cc = GET_FIELD_SP(insn, 20, 21);
3187 if (cc == 0)
3188 do_branch(dc, target, insn, 0);
3189 else if (cc == 2)
3190 do_branch(dc, target, insn, 1);
3191 else
3192 goto illegal_insn;
3193 goto jmp_insn;
3195 case 0x3: /* V9 BPr */
3197 target = GET_FIELD_SP(insn, 0, 13) |
3198 (GET_FIELD_SP(insn, 20, 21) << 14);
3199 target = sign_extend(target, 16);
3200 target <<= 2;
3201 cpu_src1 = get_src1(dc, insn);
3202 do_branch_reg(dc, target, insn, cpu_src1);
3203 goto jmp_insn;
3205 case 0x5: /* V9 FBPcc */
3207 int cc = GET_FIELD_SP(insn, 20, 21);
3208 if (gen_trap_ifnofpu(dc)) {
3209 goto jmp_insn;
3211 target = GET_FIELD_SP(insn, 0, 18);
3212 target = sign_extend(target, 19);
3213 target <<= 2;
3214 do_fbranch(dc, target, insn, cc);
3215 goto jmp_insn;
3217 #else
3218 case 0x7: /* CBN+x */
3220 goto ncp_insn;
3222 #endif
3223 case 0x2: /* BN+x */
3225 target = GET_FIELD(insn, 10, 31);
3226 target = sign_extend(target, 22);
3227 target <<= 2;
3228 do_branch(dc, target, insn, 0);
3229 goto jmp_insn;
3231 case 0x6: /* FBN+x */
3233 if (gen_trap_ifnofpu(dc)) {
3234 goto jmp_insn;
3236 target = GET_FIELD(insn, 10, 31);
3237 target = sign_extend(target, 22);
3238 target <<= 2;
3239 do_fbranch(dc, target, insn, 0);
3240 goto jmp_insn;
3242 case 0x4: /* SETHI */
3243 /* Special-case %g0 because that's the canonical nop. */
3244 if (rd) {
3245 uint32_t value = GET_FIELD(insn, 10, 31);
3246 TCGv t = gen_dest_gpr(dc, rd);
3247 tcg_gen_movi_tl(t, value << 10);
3248 gen_store_gpr(dc, rd, t);
3250 break;
3251 case 0x0: /* UNIMPL */
3252 default:
3253 goto illegal_insn;
3255 break;
3257 break;
3258 case 1: /*CALL*/
3260 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3261 TCGv o7 = gen_dest_gpr(dc, 15);
3263 tcg_gen_movi_tl(o7, dc->pc);
3264 gen_store_gpr(dc, 15, o7);
3265 target += dc->pc;
3266 gen_mov_pc_npc(dc);
3267 #ifdef TARGET_SPARC64
3268 if (unlikely(AM_CHECK(dc))) {
3269 target &= 0xffffffffULL;
3271 #endif
3272 dc->npc = target;
3274 goto jmp_insn;
3275 case 2: /* FPU & Logical Operations */
3277 unsigned int xop = GET_FIELD(insn, 7, 12);
3278 TCGv cpu_dst = get_temp_tl(dc);
3279 TCGv cpu_tmp0;
3281 if (xop == 0x3a) { /* generate trap */
3282 int cond = GET_FIELD(insn, 3, 6);
3283 TCGv_i32 trap;
3284 TCGLabel *l1 = NULL;
3285 int mask;
3287 if (cond == 0) {
3288 /* Trap never. */
3289 break;
3292 save_state(dc);
3294 if (cond != 8) {
3295 /* Conditional trap. */
3296 DisasCompare cmp;
3297 #ifdef TARGET_SPARC64
3298 /* V9 icc/xcc */
3299 int cc = GET_FIELD_SP(insn, 11, 12);
3300 if (cc == 0) {
3301 gen_compare(&cmp, 0, cond, dc);
3302 } else if (cc == 2) {
3303 gen_compare(&cmp, 1, cond, dc);
3304 } else {
3305 goto illegal_insn;
3307 #else
3308 gen_compare(&cmp, 0, cond, dc);
3309 #endif
3310 l1 = gen_new_label();
3311 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3312 cmp.c1, cmp.c2, l1);
3313 free_compare(&cmp);
3316 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3317 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3319 /* Don't use the normal temporaries, as they may well have
3320 gone out of scope with the branch above. While we're
3321 doing that we might as well pre-truncate to 32-bit. */
3322 trap = tcg_temp_new_i32();
3324 rs1 = GET_FIELD_SP(insn, 14, 18);
3325 if (IS_IMM) {
3326 rs2 = GET_FIELD_SP(insn, 0, 7);
3327 if (rs1 == 0) {
3328 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3329 /* Signal that the trap value is fully constant. */
3330 mask = 0;
3331 } else {
3332 TCGv t1 = gen_load_gpr(dc, rs1);
3333 tcg_gen_trunc_tl_i32(trap, t1);
3334 tcg_gen_addi_i32(trap, trap, rs2);
3336 } else {
3337 TCGv t1, t2;
3338 rs2 = GET_FIELD_SP(insn, 0, 4);
3339 t1 = gen_load_gpr(dc, rs1);
3340 t2 = gen_load_gpr(dc, rs2);
3341 tcg_gen_add_tl(t1, t1, t2);
3342 tcg_gen_trunc_tl_i32(trap, t1);
3344 if (mask != 0) {
3345 tcg_gen_andi_i32(trap, trap, mask);
3346 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3349 gen_helper_raise_exception(cpu_env, trap);
3350 tcg_temp_free_i32(trap);
3352 if (cond == 8) {
3353 /* An unconditional trap ends the TB. */
3354 dc->is_br = 1;
3355 goto jmp_insn;
3356 } else {
3357 /* A conditional trap falls through to the next insn. */
3358 gen_set_label(l1);
3359 break;
3361 } else if (xop == 0x28) {
3362 rs1 = GET_FIELD(insn, 13, 17);
3363 switch(rs1) {
3364 case 0: /* rdy */
3365 #ifndef TARGET_SPARC64
3366 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3367 manual, rdy on the microSPARC
3368 II */
3369 case 0x0f: /* stbar in the SPARCv8 manual,
3370 rdy on the microSPARC II */
3371 case 0x10 ... 0x1f: /* implementation-dependent in the
3372 SPARCv8 manual, rdy on the
3373 microSPARC II */
3374 /* Read Asr17 */
3375 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3376 TCGv t = gen_dest_gpr(dc, rd);
3377 /* Read Asr17 for a Leon3 monoprocessor */
3378 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3379 gen_store_gpr(dc, rd, t);
3380 break;
3382 #endif
3383 gen_store_gpr(dc, rd, cpu_y);
3384 break;
3385 #ifdef TARGET_SPARC64
3386 case 0x2: /* V9 rdccr */
3387 update_psr(dc);
3388 gen_helper_rdccr(cpu_dst, cpu_env);
3389 gen_store_gpr(dc, rd, cpu_dst);
3390 break;
3391 case 0x3: /* V9 rdasi */
3392 tcg_gen_movi_tl(cpu_dst, dc->asi);
3393 gen_store_gpr(dc, rd, cpu_dst);
3394 break;
3395 case 0x4: /* V9 rdtick */
3397 TCGv_ptr r_tickptr;
3398 TCGv_i32 r_const;
3400 r_tickptr = tcg_temp_new_ptr();
3401 r_const = tcg_const_i32(dc->mem_idx);
3402 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3403 offsetof(CPUSPARCState, tick));
3404 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3405 r_const);
3406 tcg_temp_free_ptr(r_tickptr);
3407 tcg_temp_free_i32(r_const);
3408 gen_store_gpr(dc, rd, cpu_dst);
3410 break;
3411 case 0x5: /* V9 rdpc */
3413 TCGv t = gen_dest_gpr(dc, rd);
3414 if (unlikely(AM_CHECK(dc))) {
3415 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3416 } else {
3417 tcg_gen_movi_tl(t, dc->pc);
3419 gen_store_gpr(dc, rd, t);
3421 break;
3422 case 0x6: /* V9 rdfprs */
3423 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3424 gen_store_gpr(dc, rd, cpu_dst);
3425 break;
3426 case 0xf: /* V9 membar */
3427 break; /* no effect */
3428 case 0x13: /* Graphics Status */
3429 if (gen_trap_ifnofpu(dc)) {
3430 goto jmp_insn;
3432 gen_store_gpr(dc, rd, cpu_gsr);
3433 break;
3434 case 0x16: /* Softint */
3435 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3436 offsetof(CPUSPARCState, softint));
3437 gen_store_gpr(dc, rd, cpu_dst);
3438 break;
3439 case 0x17: /* Tick compare */
3440 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3441 break;
3442 case 0x18: /* System tick */
3444 TCGv_ptr r_tickptr;
3445 TCGv_i32 r_const;
3447 r_tickptr = tcg_temp_new_ptr();
3448 r_const = tcg_const_i32(dc->mem_idx);
3449 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3450 offsetof(CPUSPARCState, stick));
3451 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3452 r_const);
3453 tcg_temp_free_ptr(r_tickptr);
3454 tcg_temp_free_i32(r_const);
3455 gen_store_gpr(dc, rd, cpu_dst);
3457 break;
3458 case 0x19: /* System tick compare */
3459 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3460 break;
3461 case 0x1a: /* UltraSPARC-T1 Strand status */
3462 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3463 * this ASR as impl. dep
3465 CHECK_IU_FEATURE(dc, HYPV);
3467 TCGv t = gen_dest_gpr(dc, rd);
3468 tcg_gen_movi_tl(t, 1UL);
3469 gen_store_gpr(dc, rd, t);
3471 break;
3472 case 0x10: /* Performance Control */
3473 case 0x11: /* Performance Instrumentation Counter */
3474 case 0x12: /* Dispatch Control */
3475 case 0x14: /* Softint set, WO */
3476 case 0x15: /* Softint clear, WO */
3477 #endif
3478 default:
3479 goto illegal_insn;
3481 #if !defined(CONFIG_USER_ONLY)
3482 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3483 #ifndef TARGET_SPARC64
3484 if (!supervisor(dc)) {
3485 goto priv_insn;
3487 update_psr(dc);
3488 gen_helper_rdpsr(cpu_dst, cpu_env);
3489 #else
3490 CHECK_IU_FEATURE(dc, HYPV);
3491 if (!hypervisor(dc))
3492 goto priv_insn;
3493 rs1 = GET_FIELD(insn, 13, 17);
3494 switch (rs1) {
3495 case 0: // hpstate
3496 tcg_gen_ld_i64(cpu_dst, cpu_env,
3497 offsetof(CPUSPARCState, hpstate));
3498 break;
3499 case 1: // htstate
3500 // gen_op_rdhtstate();
3501 break;
3502 case 3: // hintp
3503 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3504 break;
3505 case 5: // htba
3506 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3507 break;
3508 case 6: // hver
3509 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3510 break;
3511 case 31: // hstick_cmpr
3512 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3513 break;
3514 default:
3515 goto illegal_insn;
3517 #endif
3518 gen_store_gpr(dc, rd, cpu_dst);
3519 break;
3520 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3521 if (!supervisor(dc)) {
3522 goto priv_insn;
3524 cpu_tmp0 = get_temp_tl(dc);
3525 #ifdef TARGET_SPARC64
3526 rs1 = GET_FIELD(insn, 13, 17);
3527 switch (rs1) {
3528 case 0: // tpc
3530 TCGv_ptr r_tsptr;
3532 r_tsptr = tcg_temp_new_ptr();
3533 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3534 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3535 offsetof(trap_state, tpc));
3536 tcg_temp_free_ptr(r_tsptr);
3538 break;
3539 case 1: // tnpc
3541 TCGv_ptr r_tsptr;
3543 r_tsptr = tcg_temp_new_ptr();
3544 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3545 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3546 offsetof(trap_state, tnpc));
3547 tcg_temp_free_ptr(r_tsptr);
3549 break;
3550 case 2: // tstate
3552 TCGv_ptr r_tsptr;
3554 r_tsptr = tcg_temp_new_ptr();
3555 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3556 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3557 offsetof(trap_state, tstate));
3558 tcg_temp_free_ptr(r_tsptr);
3560 break;
3561 case 3: // tt
3563 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3565 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3566 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3567 offsetof(trap_state, tt));
3568 tcg_temp_free_ptr(r_tsptr);
3570 break;
3571 case 4: // tick
3573 TCGv_ptr r_tickptr;
3574 TCGv_i32 r_const;
3576 r_tickptr = tcg_temp_new_ptr();
3577 r_const = tcg_const_i32(dc->mem_idx);
3578 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3579 offsetof(CPUSPARCState, tick));
3580 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3581 r_tickptr, r_const);
3582 tcg_temp_free_ptr(r_tickptr);
3583 tcg_temp_free_i32(r_const);
3585 break;
3586 case 5: // tba
3587 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3588 break;
3589 case 6: // pstate
3590 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3591 offsetof(CPUSPARCState, pstate));
3592 break;
3593 case 7: // tl
3594 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3595 offsetof(CPUSPARCState, tl));
3596 break;
3597 case 8: // pil
3598 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3599 offsetof(CPUSPARCState, psrpil));
3600 break;
3601 case 9: // cwp
3602 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3603 break;
3604 case 10: // cansave
3605 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3606 offsetof(CPUSPARCState, cansave));
3607 break;
3608 case 11: // canrestore
3609 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3610 offsetof(CPUSPARCState, canrestore));
3611 break;
3612 case 12: // cleanwin
3613 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3614 offsetof(CPUSPARCState, cleanwin));
3615 break;
3616 case 13: // otherwin
3617 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3618 offsetof(CPUSPARCState, otherwin));
3619 break;
3620 case 14: // wstate
3621 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3622 offsetof(CPUSPARCState, wstate));
3623 break;
3624 case 16: // UA2005 gl
3625 CHECK_IU_FEATURE(dc, GL);
3626 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3627 offsetof(CPUSPARCState, gl));
3628 break;
3629 case 26: // UA2005 strand status
3630 CHECK_IU_FEATURE(dc, HYPV);
3631 if (!hypervisor(dc))
3632 goto priv_insn;
3633 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3634 break;
3635 case 31: // ver
3636 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3637 break;
3638 case 15: // fq
3639 default:
3640 goto illegal_insn;
3642 #else
3643 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3644 #endif
3645 gen_store_gpr(dc, rd, cpu_tmp0);
3646 break;
3647 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3648 #ifdef TARGET_SPARC64
3649 gen_helper_flushw(cpu_env);
3650 #else
3651 if (!supervisor(dc))
3652 goto priv_insn;
3653 gen_store_gpr(dc, rd, cpu_tbr);
3654 #endif
3655 break;
3656 #endif
3657 } else if (xop == 0x34) { /* FPU Operations */
3658 if (gen_trap_ifnofpu(dc)) {
3659 goto jmp_insn;
3661 gen_op_clear_ieee_excp_and_FTT();
3662 rs1 = GET_FIELD(insn, 13, 17);
3663 rs2 = GET_FIELD(insn, 27, 31);
3664 xop = GET_FIELD(insn, 18, 26);
3666 switch (xop) {
3667 case 0x1: /* fmovs */
3668 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3669 gen_store_fpr_F(dc, rd, cpu_src1_32);
3670 break;
3671 case 0x5: /* fnegs */
3672 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3673 break;
3674 case 0x9: /* fabss */
3675 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3676 break;
3677 case 0x29: /* fsqrts */
3678 CHECK_FPU_FEATURE(dc, FSQRT);
3679 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3680 break;
3681 case 0x2a: /* fsqrtd */
3682 CHECK_FPU_FEATURE(dc, FSQRT);
3683 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3684 break;
3685 case 0x2b: /* fsqrtq */
3686 CHECK_FPU_FEATURE(dc, FLOAT128);
3687 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3688 break;
3689 case 0x41: /* fadds */
3690 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3691 break;
3692 case 0x42: /* faddd */
3693 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3694 break;
3695 case 0x43: /* faddq */
3696 CHECK_FPU_FEATURE(dc, FLOAT128);
3697 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3698 break;
3699 case 0x45: /* fsubs */
3700 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3701 break;
3702 case 0x46: /* fsubd */
3703 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3704 break;
3705 case 0x47: /* fsubq */
3706 CHECK_FPU_FEATURE(dc, FLOAT128);
3707 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3708 break;
3709 case 0x49: /* fmuls */
3710 CHECK_FPU_FEATURE(dc, FMUL);
3711 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3712 break;
3713 case 0x4a: /* fmuld */
3714 CHECK_FPU_FEATURE(dc, FMUL);
3715 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3716 break;
3717 case 0x4b: /* fmulq */
3718 CHECK_FPU_FEATURE(dc, FLOAT128);
3719 CHECK_FPU_FEATURE(dc, FMUL);
3720 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3721 break;
3722 case 0x4d: /* fdivs */
3723 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3724 break;
3725 case 0x4e: /* fdivd */
3726 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3727 break;
3728 case 0x4f: /* fdivq */
3729 CHECK_FPU_FEATURE(dc, FLOAT128);
3730 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3731 break;
3732 case 0x69: /* fsmuld */
3733 CHECK_FPU_FEATURE(dc, FSMULD);
3734 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3735 break;
3736 case 0x6e: /* fdmulq */
3737 CHECK_FPU_FEATURE(dc, FLOAT128);
3738 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3739 break;
3740 case 0xc4: /* fitos */
3741 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3742 break;
3743 case 0xc6: /* fdtos */
3744 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3745 break;
3746 case 0xc7: /* fqtos */
3747 CHECK_FPU_FEATURE(dc, FLOAT128);
3748 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3749 break;
3750 case 0xc8: /* fitod */
3751 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3752 break;
3753 case 0xc9: /* fstod */
3754 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3755 break;
3756 case 0xcb: /* fqtod */
3757 CHECK_FPU_FEATURE(dc, FLOAT128);
3758 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3759 break;
3760 case 0xcc: /* fitoq */
3761 CHECK_FPU_FEATURE(dc, FLOAT128);
3762 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3763 break;
3764 case 0xcd: /* fstoq */
3765 CHECK_FPU_FEATURE(dc, FLOAT128);
3766 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3767 break;
3768 case 0xce: /* fdtoq */
3769 CHECK_FPU_FEATURE(dc, FLOAT128);
3770 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3771 break;
3772 case 0xd1: /* fstoi */
3773 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3774 break;
3775 case 0xd2: /* fdtoi */
3776 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3777 break;
3778 case 0xd3: /* fqtoi */
3779 CHECK_FPU_FEATURE(dc, FLOAT128);
3780 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3781 break;
3782 #ifdef TARGET_SPARC64
3783 case 0x2: /* V9 fmovd */
3784 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3785 gen_store_fpr_D(dc, rd, cpu_src1_64);
3786 break;
3787 case 0x3: /* V9 fmovq */
3788 CHECK_FPU_FEATURE(dc, FLOAT128);
3789 gen_move_Q(dc, rd, rs2);
3790 break;
3791 case 0x6: /* V9 fnegd */
3792 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3793 break;
3794 case 0x7: /* V9 fnegq */
3795 CHECK_FPU_FEATURE(dc, FLOAT128);
3796 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3797 break;
3798 case 0xa: /* V9 fabsd */
3799 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3800 break;
3801 case 0xb: /* V9 fabsq */
3802 CHECK_FPU_FEATURE(dc, FLOAT128);
3803 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3804 break;
3805 case 0x81: /* V9 fstox */
3806 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3807 break;
3808 case 0x82: /* V9 fdtox */
3809 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3810 break;
3811 case 0x83: /* V9 fqtox */
3812 CHECK_FPU_FEATURE(dc, FLOAT128);
3813 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3814 break;
3815 case 0x84: /* V9 fxtos */
3816 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3817 break;
3818 case 0x88: /* V9 fxtod */
3819 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3820 break;
3821 case 0x8c: /* V9 fxtoq */
3822 CHECK_FPU_FEATURE(dc, FLOAT128);
3823 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3824 break;
3825 #endif
3826 default:
3827 goto illegal_insn;
3829 } else if (xop == 0x35) { /* FPU Operations */
3830 #ifdef TARGET_SPARC64
3831 int cond;
3832 #endif
3833 if (gen_trap_ifnofpu(dc)) {
3834 goto jmp_insn;
3836 gen_op_clear_ieee_excp_and_FTT();
3837 rs1 = GET_FIELD(insn, 13, 17);
3838 rs2 = GET_FIELD(insn, 27, 31);
3839 xop = GET_FIELD(insn, 18, 26);
3841 #ifdef TARGET_SPARC64
3842 #define FMOVR(sz) \
3843 do { \
3844 DisasCompare cmp; \
3845 cond = GET_FIELD_SP(insn, 10, 12); \
3846 cpu_src1 = get_src1(dc, insn); \
3847 gen_compare_reg(&cmp, cond, cpu_src1); \
3848 gen_fmov##sz(dc, &cmp, rd, rs2); \
3849 free_compare(&cmp); \
3850 } while (0)
3852 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3853 FMOVR(s);
3854 break;
3855 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3856 FMOVR(d);
3857 break;
3858 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3859 CHECK_FPU_FEATURE(dc, FLOAT128);
3860 FMOVR(q);
3861 break;
3863 #undef FMOVR
3864 #endif
3865 switch (xop) {
3866 #ifdef TARGET_SPARC64
3867 #define FMOVCC(fcc, sz) \
3868 do { \
3869 DisasCompare cmp; \
3870 cond = GET_FIELD_SP(insn, 14, 17); \
3871 gen_fcompare(&cmp, fcc, cond); \
3872 gen_fmov##sz(dc, &cmp, rd, rs2); \
3873 free_compare(&cmp); \
3874 } while (0)
3876 case 0x001: /* V9 fmovscc %fcc0 */
3877 FMOVCC(0, s);
3878 break;
3879 case 0x002: /* V9 fmovdcc %fcc0 */
3880 FMOVCC(0, d);
3881 break;
3882 case 0x003: /* V9 fmovqcc %fcc0 */
3883 CHECK_FPU_FEATURE(dc, FLOAT128);
3884 FMOVCC(0, q);
3885 break;
3886 case 0x041: /* V9 fmovscc %fcc1 */
3887 FMOVCC(1, s);
3888 break;
3889 case 0x042: /* V9 fmovdcc %fcc1 */
3890 FMOVCC(1, d);
3891 break;
3892 case 0x043: /* V9 fmovqcc %fcc1 */
3893 CHECK_FPU_FEATURE(dc, FLOAT128);
3894 FMOVCC(1, q);
3895 break;
3896 case 0x081: /* V9 fmovscc %fcc2 */
3897 FMOVCC(2, s);
3898 break;
3899 case 0x082: /* V9 fmovdcc %fcc2 */
3900 FMOVCC(2, d);
3901 break;
3902 case 0x083: /* V9 fmovqcc %fcc2 */
3903 CHECK_FPU_FEATURE(dc, FLOAT128);
3904 FMOVCC(2, q);
3905 break;
3906 case 0x0c1: /* V9 fmovscc %fcc3 */
3907 FMOVCC(3, s);
3908 break;
3909 case 0x0c2: /* V9 fmovdcc %fcc3 */
3910 FMOVCC(3, d);
3911 break;
3912 case 0x0c3: /* V9 fmovqcc %fcc3 */
3913 CHECK_FPU_FEATURE(dc, FLOAT128);
3914 FMOVCC(3, q);
3915 break;
3916 #undef FMOVCC
3917 #define FMOVCC(xcc, sz) \
3918 do { \
3919 DisasCompare cmp; \
3920 cond = GET_FIELD_SP(insn, 14, 17); \
3921 gen_compare(&cmp, xcc, cond, dc); \
3922 gen_fmov##sz(dc, &cmp, rd, rs2); \
3923 free_compare(&cmp); \
3924 } while (0)
3926 case 0x101: /* V9 fmovscc %icc */
3927 FMOVCC(0, s);
3928 break;
3929 case 0x102: /* V9 fmovdcc %icc */
3930 FMOVCC(0, d);
3931 break;
3932 case 0x103: /* V9 fmovqcc %icc */
3933 CHECK_FPU_FEATURE(dc, FLOAT128);
3934 FMOVCC(0, q);
3935 break;
3936 case 0x181: /* V9 fmovscc %xcc */
3937 FMOVCC(1, s);
3938 break;
3939 case 0x182: /* V9 fmovdcc %xcc */
3940 FMOVCC(1, d);
3941 break;
3942 case 0x183: /* V9 fmovqcc %xcc */
3943 CHECK_FPU_FEATURE(dc, FLOAT128);
3944 FMOVCC(1, q);
3945 break;
3946 #undef FMOVCC
3947 #endif
3948 case 0x51: /* fcmps, V9 %fcc */
3949 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3950 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3951 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3952 break;
3953 case 0x52: /* fcmpd, V9 %fcc */
3954 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3955 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3956 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3957 break;
3958 case 0x53: /* fcmpq, V9 %fcc */
3959 CHECK_FPU_FEATURE(dc, FLOAT128);
3960 gen_op_load_fpr_QT0(QFPREG(rs1));
3961 gen_op_load_fpr_QT1(QFPREG(rs2));
3962 gen_op_fcmpq(rd & 3);
3963 break;
3964 case 0x55: /* fcmpes, V9 %fcc */
3965 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3966 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3967 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3968 break;
3969 case 0x56: /* fcmped, V9 %fcc */
3970 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3971 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3972 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3973 break;
3974 case 0x57: /* fcmpeq, V9 %fcc */
3975 CHECK_FPU_FEATURE(dc, FLOAT128);
3976 gen_op_load_fpr_QT0(QFPREG(rs1));
3977 gen_op_load_fpr_QT1(QFPREG(rs2));
3978 gen_op_fcmpeq(rd & 3);
3979 break;
3980 default:
3981 goto illegal_insn;
3983 } else if (xop == 0x2) {
3984 TCGv dst = gen_dest_gpr(dc, rd);
3985 rs1 = GET_FIELD(insn, 13, 17);
3986 if (rs1 == 0) {
3987 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3988 if (IS_IMM) { /* immediate */
3989 simm = GET_FIELDs(insn, 19, 31);
3990 tcg_gen_movi_tl(dst, simm);
3991 gen_store_gpr(dc, rd, dst);
3992 } else { /* register */
3993 rs2 = GET_FIELD(insn, 27, 31);
3994 if (rs2 == 0) {
3995 tcg_gen_movi_tl(dst, 0);
3996 gen_store_gpr(dc, rd, dst);
3997 } else {
3998 cpu_src2 = gen_load_gpr(dc, rs2);
3999 gen_store_gpr(dc, rd, cpu_src2);
4002 } else {
4003 cpu_src1 = get_src1(dc, insn);
4004 if (IS_IMM) { /* immediate */
4005 simm = GET_FIELDs(insn, 19, 31);
4006 tcg_gen_ori_tl(dst, cpu_src1, simm);
4007 gen_store_gpr(dc, rd, dst);
4008 } else { /* register */
4009 rs2 = GET_FIELD(insn, 27, 31);
4010 if (rs2 == 0) {
4011 /* mov shortcut: or x, %g0, y -> mov x, y */
4012 gen_store_gpr(dc, rd, cpu_src1);
4013 } else {
4014 cpu_src2 = gen_load_gpr(dc, rs2);
4015 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4016 gen_store_gpr(dc, rd, dst);
4020 #ifdef TARGET_SPARC64
4021 } else if (xop == 0x25) { /* sll, V9 sllx */
4022 cpu_src1 = get_src1(dc, insn);
4023 if (IS_IMM) { /* immediate */
4024 simm = GET_FIELDs(insn, 20, 31);
4025 if (insn & (1 << 12)) {
4026 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4027 } else {
4028 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4030 } else { /* register */
4031 rs2 = GET_FIELD(insn, 27, 31);
4032 cpu_src2 = gen_load_gpr(dc, rs2);
4033 cpu_tmp0 = get_temp_tl(dc);
4034 if (insn & (1 << 12)) {
4035 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4036 } else {
4037 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4039 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4041 gen_store_gpr(dc, rd, cpu_dst);
4042 } else if (xop == 0x26) { /* srl, V9 srlx */
4043 cpu_src1 = get_src1(dc, insn);
4044 if (IS_IMM) { /* immediate */
4045 simm = GET_FIELDs(insn, 20, 31);
4046 if (insn & (1 << 12)) {
4047 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4048 } else {
4049 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4050 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4052 } else { /* register */
4053 rs2 = GET_FIELD(insn, 27, 31);
4054 cpu_src2 = gen_load_gpr(dc, rs2);
4055 cpu_tmp0 = get_temp_tl(dc);
4056 if (insn & (1 << 12)) {
4057 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4058 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4059 } else {
4060 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4061 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4062 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4065 gen_store_gpr(dc, rd, cpu_dst);
4066 } else if (xop == 0x27) { /* sra, V9 srax */
4067 cpu_src1 = get_src1(dc, insn);
4068 if (IS_IMM) { /* immediate */
4069 simm = GET_FIELDs(insn, 20, 31);
4070 if (insn & (1 << 12)) {
4071 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4072 } else {
4073 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4074 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4076 } else { /* register */
4077 rs2 = GET_FIELD(insn, 27, 31);
4078 cpu_src2 = gen_load_gpr(dc, rs2);
4079 cpu_tmp0 = get_temp_tl(dc);
4080 if (insn & (1 << 12)) {
4081 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4082 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4083 } else {
4084 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4085 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4086 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4089 gen_store_gpr(dc, rd, cpu_dst);
4090 #endif
4091 } else if (xop < 0x36) {
4092 if (xop < 0x20) {
4093 cpu_src1 = get_src1(dc, insn);
4094 cpu_src2 = get_src2(dc, insn);
4095 switch (xop & ~0x10) {
4096 case 0x0: /* add */
4097 if (xop & 0x10) {
4098 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4099 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4100 dc->cc_op = CC_OP_ADD;
4101 } else {
4102 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4104 break;
4105 case 0x1: /* and */
4106 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4107 if (xop & 0x10) {
4108 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4109 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4110 dc->cc_op = CC_OP_LOGIC;
4112 break;
4113 case 0x2: /* or */
4114 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4115 if (xop & 0x10) {
4116 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4117 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4118 dc->cc_op = CC_OP_LOGIC;
4120 break;
4121 case 0x3: /* xor */
4122 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4123 if (xop & 0x10) {
4124 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4125 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4126 dc->cc_op = CC_OP_LOGIC;
4128 break;
4129 case 0x4: /* sub */
4130 if (xop & 0x10) {
4131 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4132 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4133 dc->cc_op = CC_OP_SUB;
4134 } else {
4135 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4137 break;
4138 case 0x5: /* andn */
4139 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4140 if (xop & 0x10) {
4141 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4142 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4143 dc->cc_op = CC_OP_LOGIC;
4145 break;
4146 case 0x6: /* orn */
4147 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4148 if (xop & 0x10) {
4149 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4150 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4151 dc->cc_op = CC_OP_LOGIC;
4153 break;
4154 case 0x7: /* xorn */
4155 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4156 if (xop & 0x10) {
4157 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4158 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4159 dc->cc_op = CC_OP_LOGIC;
4161 break;
4162 case 0x8: /* addx, V9 addc */
4163 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4164 (xop & 0x10));
4165 break;
4166 #ifdef TARGET_SPARC64
4167 case 0x9: /* V9 mulx */
4168 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4169 break;
4170 #endif
4171 case 0xa: /* umul */
4172 CHECK_IU_FEATURE(dc, MUL);
4173 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4174 if (xop & 0x10) {
4175 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4176 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4177 dc->cc_op = CC_OP_LOGIC;
4179 break;
4180 case 0xb: /* smul */
4181 CHECK_IU_FEATURE(dc, MUL);
4182 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4183 if (xop & 0x10) {
4184 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4185 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4186 dc->cc_op = CC_OP_LOGIC;
4188 break;
4189 case 0xc: /* subx, V9 subc */
4190 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4191 (xop & 0x10));
4192 break;
4193 #ifdef TARGET_SPARC64
4194 case 0xd: /* V9 udivx */
4195 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4196 break;
4197 #endif
4198 case 0xe: /* udiv */
4199 CHECK_IU_FEATURE(dc, DIV);
4200 if (xop & 0x10) {
4201 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4202 cpu_src2);
4203 dc->cc_op = CC_OP_DIV;
4204 } else {
4205 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4206 cpu_src2);
4208 break;
4209 case 0xf: /* sdiv */
4210 CHECK_IU_FEATURE(dc, DIV);
4211 if (xop & 0x10) {
4212 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4213 cpu_src2);
4214 dc->cc_op = CC_OP_DIV;
4215 } else {
4216 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4217 cpu_src2);
4219 break;
4220 default:
4221 goto illegal_insn;
4223 gen_store_gpr(dc, rd, cpu_dst);
4224 } else {
4225 cpu_src1 = get_src1(dc, insn);
4226 cpu_src2 = get_src2(dc, insn);
4227 switch (xop) {
4228 case 0x20: /* taddcc */
4229 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4230 gen_store_gpr(dc, rd, cpu_dst);
4231 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4232 dc->cc_op = CC_OP_TADD;
4233 break;
4234 case 0x21: /* tsubcc */
4235 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4236 gen_store_gpr(dc, rd, cpu_dst);
4237 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4238 dc->cc_op = CC_OP_TSUB;
4239 break;
4240 case 0x22: /* taddcctv */
4241 gen_helper_taddcctv(cpu_dst, cpu_env,
4242 cpu_src1, cpu_src2);
4243 gen_store_gpr(dc, rd, cpu_dst);
4244 dc->cc_op = CC_OP_TADDTV;
4245 break;
4246 case 0x23: /* tsubcctv */
4247 gen_helper_tsubcctv(cpu_dst, cpu_env,
4248 cpu_src1, cpu_src2);
4249 gen_store_gpr(dc, rd, cpu_dst);
4250 dc->cc_op = CC_OP_TSUBTV;
4251 break;
4252 case 0x24: /* mulscc */
4253 update_psr(dc);
4254 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4255 gen_store_gpr(dc, rd, cpu_dst);
4256 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4257 dc->cc_op = CC_OP_ADD;
4258 break;
4259 #ifndef TARGET_SPARC64
4260 case 0x25: /* sll */
4261 if (IS_IMM) { /* immediate */
4262 simm = GET_FIELDs(insn, 20, 31);
4263 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4264 } else { /* register */
4265 cpu_tmp0 = get_temp_tl(dc);
4266 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4267 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4269 gen_store_gpr(dc, rd, cpu_dst);
4270 break;
4271 case 0x26: /* srl */
4272 if (IS_IMM) { /* immediate */
4273 simm = GET_FIELDs(insn, 20, 31);
4274 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4275 } else { /* register */
4276 cpu_tmp0 = get_temp_tl(dc);
4277 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4278 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4280 gen_store_gpr(dc, rd, cpu_dst);
4281 break;
4282 case 0x27: /* sra */
4283 if (IS_IMM) { /* immediate */
4284 simm = GET_FIELDs(insn, 20, 31);
4285 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4286 } else { /* register */
4287 cpu_tmp0 = get_temp_tl(dc);
4288 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4289 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4291 gen_store_gpr(dc, rd, cpu_dst);
4292 break;
4293 #endif
4294 case 0x30:
4296 cpu_tmp0 = get_temp_tl(dc);
4297 switch(rd) {
4298 case 0: /* wry */
4299 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4300 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4301 break;
4302 #ifndef TARGET_SPARC64
4303 case 0x01 ... 0x0f: /* undefined in the
4304 SPARCv8 manual, nop
4305 on the microSPARC
4306 II */
4307 case 0x10 ... 0x1f: /* implementation-dependent
4308 in the SPARCv8
4309 manual, nop on the
4310 microSPARC II */
4311 if ((rd == 0x13) && (dc->def->features &
4312 CPU_FEATURE_POWERDOWN)) {
4313 /* LEON3 power-down */
4314 save_state(dc);
4315 gen_helper_power_down(cpu_env);
4317 break;
4318 #else
4319 case 0x2: /* V9 wrccr */
4320 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4321 gen_helper_wrccr(cpu_env, cpu_tmp0);
4322 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4323 dc->cc_op = CC_OP_FLAGS;
4324 break;
4325 case 0x3: /* V9 wrasi */
4326 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4327 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4328 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4329 offsetof(CPUSPARCState, asi));
4330 /* End TB to notice changed ASI. */
4331 save_state(dc);
4332 gen_op_next_insn();
4333 tcg_gen_exit_tb(0);
4334 dc->is_br = 1;
4335 break;
4336 case 0x6: /* V9 wrfprs */
4337 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4338 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4339 dc->fprs_dirty = 0;
4340 save_state(dc);
4341 gen_op_next_insn();
4342 tcg_gen_exit_tb(0);
4343 dc->is_br = 1;
4344 break;
4345 case 0xf: /* V9 sir, nop if user */
4346 #if !defined(CONFIG_USER_ONLY)
4347 if (supervisor(dc)) {
4348 ; // XXX
4350 #endif
4351 break;
4352 case 0x13: /* Graphics Status */
4353 if (gen_trap_ifnofpu(dc)) {
4354 goto jmp_insn;
4356 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4357 break;
4358 case 0x14: /* Softint set */
4359 if (!supervisor(dc))
4360 goto illegal_insn;
4361 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4362 gen_helper_set_softint(cpu_env, cpu_tmp0);
4363 break;
4364 case 0x15: /* Softint clear */
4365 if (!supervisor(dc))
4366 goto illegal_insn;
4367 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4368 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4369 break;
4370 case 0x16: /* Softint write */
4371 if (!supervisor(dc))
4372 goto illegal_insn;
4373 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4374 gen_helper_write_softint(cpu_env, cpu_tmp0);
4375 break;
4376 case 0x17: /* Tick compare */
4377 #if !defined(CONFIG_USER_ONLY)
4378 if (!supervisor(dc))
4379 goto illegal_insn;
4380 #endif
4382 TCGv_ptr r_tickptr;
4384 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4385 cpu_src2);
4386 r_tickptr = tcg_temp_new_ptr();
4387 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4388 offsetof(CPUSPARCState, tick));
4389 gen_helper_tick_set_limit(r_tickptr,
4390 cpu_tick_cmpr);
4391 tcg_temp_free_ptr(r_tickptr);
4393 break;
4394 case 0x18: /* System tick */
4395 #if !defined(CONFIG_USER_ONLY)
4396 if (!supervisor(dc))
4397 goto illegal_insn;
4398 #endif
4400 TCGv_ptr r_tickptr;
4402 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4403 cpu_src2);
4404 r_tickptr = tcg_temp_new_ptr();
4405 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4406 offsetof(CPUSPARCState, stick));
4407 gen_helper_tick_set_count(r_tickptr,
4408 cpu_tmp0);
4409 tcg_temp_free_ptr(r_tickptr);
4411 break;
4412 case 0x19: /* System tick compare */
4413 #if !defined(CONFIG_USER_ONLY)
4414 if (!supervisor(dc))
4415 goto illegal_insn;
4416 #endif
4418 TCGv_ptr r_tickptr;
4420 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4421 cpu_src2);
4422 r_tickptr = tcg_temp_new_ptr();
4423 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4424 offsetof(CPUSPARCState, stick));
4425 gen_helper_tick_set_limit(r_tickptr,
4426 cpu_stick_cmpr);
4427 tcg_temp_free_ptr(r_tickptr);
4429 break;
4431 case 0x10: /* Performance Control */
4432 case 0x11: /* Performance Instrumentation
4433 Counter */
4434 case 0x12: /* Dispatch Control */
4435 #endif
4436 default:
4437 goto illegal_insn;
4440 break;
4441 #if !defined(CONFIG_USER_ONLY)
4442 case 0x31: /* wrpsr, V9 saved, restored */
4444 if (!supervisor(dc))
4445 goto priv_insn;
4446 #ifdef TARGET_SPARC64
4447 switch (rd) {
4448 case 0:
4449 gen_helper_saved(cpu_env);
4450 break;
4451 case 1:
4452 gen_helper_restored(cpu_env);
4453 break;
4454 case 2: /* UA2005 allclean */
4455 case 3: /* UA2005 otherw */
4456 case 4: /* UA2005 normalw */
4457 case 5: /* UA2005 invalw */
4458 // XXX
4459 default:
4460 goto illegal_insn;
4462 #else
4463 cpu_tmp0 = get_temp_tl(dc);
4464 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4465 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4466 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4467 dc->cc_op = CC_OP_FLAGS;
4468 save_state(dc);
4469 gen_op_next_insn();
4470 tcg_gen_exit_tb(0);
4471 dc->is_br = 1;
4472 #endif
4474 break;
4475 case 0x32: /* wrwim, V9 wrpr */
4477 if (!supervisor(dc))
4478 goto priv_insn;
4479 cpu_tmp0 = get_temp_tl(dc);
4480 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4481 #ifdef TARGET_SPARC64
4482 switch (rd) {
4483 case 0: // tpc
4485 TCGv_ptr r_tsptr;
4487 r_tsptr = tcg_temp_new_ptr();
4488 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4489 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4490 offsetof(trap_state, tpc));
4491 tcg_temp_free_ptr(r_tsptr);
4493 break;
4494 case 1: // tnpc
4496 TCGv_ptr r_tsptr;
4498 r_tsptr = tcg_temp_new_ptr();
4499 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4500 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4501 offsetof(trap_state, tnpc));
4502 tcg_temp_free_ptr(r_tsptr);
4504 break;
4505 case 2: // tstate
4507 TCGv_ptr r_tsptr;
4509 r_tsptr = tcg_temp_new_ptr();
4510 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4511 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4512 offsetof(trap_state,
4513 tstate));
4514 tcg_temp_free_ptr(r_tsptr);
4516 break;
4517 case 3: // tt
4519 TCGv_ptr r_tsptr;
4521 r_tsptr = tcg_temp_new_ptr();
4522 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4523 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4524 offsetof(trap_state, tt));
4525 tcg_temp_free_ptr(r_tsptr);
4527 break;
4528 case 4: // tick
4530 TCGv_ptr r_tickptr;
4532 r_tickptr = tcg_temp_new_ptr();
4533 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4534 offsetof(CPUSPARCState, tick));
4535 gen_helper_tick_set_count(r_tickptr,
4536 cpu_tmp0);
4537 tcg_temp_free_ptr(r_tickptr);
4539 break;
4540 case 5: // tba
4541 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4542 break;
4543 case 6: // pstate
4544 save_state(dc);
4545 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4546 dc->npc = DYNAMIC_PC;
4547 break;
4548 case 7: // tl
4549 save_state(dc);
4550 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4551 offsetof(CPUSPARCState, tl));
4552 dc->npc = DYNAMIC_PC;
4553 break;
4554 case 8: // pil
4555 gen_helper_wrpil(cpu_env, cpu_tmp0);
4556 break;
4557 case 9: // cwp
4558 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4559 break;
4560 case 10: // cansave
4561 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4562 offsetof(CPUSPARCState,
4563 cansave));
4564 break;
4565 case 11: // canrestore
4566 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4567 offsetof(CPUSPARCState,
4568 canrestore));
4569 break;
4570 case 12: // cleanwin
4571 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4572 offsetof(CPUSPARCState,
4573 cleanwin));
4574 break;
4575 case 13: // otherwin
4576 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4577 offsetof(CPUSPARCState,
4578 otherwin));
4579 break;
4580 case 14: // wstate
4581 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4582 offsetof(CPUSPARCState,
4583 wstate));
4584 break;
4585 case 16: // UA2005 gl
4586 CHECK_IU_FEATURE(dc, GL);
4587 gen_helper_wrgl(cpu_env, cpu_tmp0);
4588 break;
4589 case 26: // UA2005 strand status
4590 CHECK_IU_FEATURE(dc, HYPV);
4591 if (!hypervisor(dc))
4592 goto priv_insn;
4593 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4594 break;
4595 default:
4596 goto illegal_insn;
4598 #else
4599 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4600 if (dc->def->nwindows != 32) {
4601 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4602 (1 << dc->def->nwindows) - 1);
4604 #endif
4606 break;
4607 case 0x33: /* wrtbr, UA2005 wrhpr */
4609 #ifndef TARGET_SPARC64
4610 if (!supervisor(dc))
4611 goto priv_insn;
4612 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4613 #else
4614 CHECK_IU_FEATURE(dc, HYPV);
4615 if (!hypervisor(dc))
4616 goto priv_insn;
4617 cpu_tmp0 = get_temp_tl(dc);
4618 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4619 switch (rd) {
4620 case 0: // hpstate
4621 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4622 offsetof(CPUSPARCState,
4623 hpstate));
4624 save_state(dc);
4625 gen_op_next_insn();
4626 tcg_gen_exit_tb(0);
4627 dc->is_br = 1;
4628 break;
4629 case 1: // htstate
4630 // XXX gen_op_wrhtstate();
4631 break;
4632 case 3: // hintp
4633 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4634 break;
4635 case 5: // htba
4636 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4637 break;
4638 case 31: // hstick_cmpr
4640 TCGv_ptr r_tickptr;
4642 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4643 r_tickptr = tcg_temp_new_ptr();
4644 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4645 offsetof(CPUSPARCState, hstick));
4646 gen_helper_tick_set_limit(r_tickptr,
4647 cpu_hstick_cmpr);
4648 tcg_temp_free_ptr(r_tickptr);
4650 break;
4651 case 6: // hver readonly
4652 default:
4653 goto illegal_insn;
4655 #endif
4657 break;
4658 #endif
4659 #ifdef TARGET_SPARC64
4660 case 0x2c: /* V9 movcc */
4662 int cc = GET_FIELD_SP(insn, 11, 12);
4663 int cond = GET_FIELD_SP(insn, 14, 17);
4664 DisasCompare cmp;
4665 TCGv dst;
4667 if (insn & (1 << 18)) {
4668 if (cc == 0) {
4669 gen_compare(&cmp, 0, cond, dc);
4670 } else if (cc == 2) {
4671 gen_compare(&cmp, 1, cond, dc);
4672 } else {
4673 goto illegal_insn;
4675 } else {
4676 gen_fcompare(&cmp, cc, cond);
4679 /* The get_src2 above loaded the normal 13-bit
4680 immediate field, not the 11-bit field we have
4681 in movcc. But it did handle the reg case. */
4682 if (IS_IMM) {
4683 simm = GET_FIELD_SPs(insn, 0, 10);
4684 tcg_gen_movi_tl(cpu_src2, simm);
4687 dst = gen_load_gpr(dc, rd);
4688 tcg_gen_movcond_tl(cmp.cond, dst,
4689 cmp.c1, cmp.c2,
4690 cpu_src2, dst);
4691 free_compare(&cmp);
4692 gen_store_gpr(dc, rd, dst);
4693 break;
4695 case 0x2d: /* V9 sdivx */
4696 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4697 gen_store_gpr(dc, rd, cpu_dst);
4698 break;
4699 case 0x2e: /* V9 popc */
4700 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4701 gen_store_gpr(dc, rd, cpu_dst);
4702 break;
4703 case 0x2f: /* V9 movr */
4705 int cond = GET_FIELD_SP(insn, 10, 12);
4706 DisasCompare cmp;
4707 TCGv dst;
4709 gen_compare_reg(&cmp, cond, cpu_src1);
4711 /* The get_src2 above loaded the normal 13-bit
4712 immediate field, not the 10-bit field we have
4713 in movr. But it did handle the reg case. */
4714 if (IS_IMM) {
4715 simm = GET_FIELD_SPs(insn, 0, 9);
4716 tcg_gen_movi_tl(cpu_src2, simm);
4719 dst = gen_load_gpr(dc, rd);
4720 tcg_gen_movcond_tl(cmp.cond, dst,
4721 cmp.c1, cmp.c2,
4722 cpu_src2, dst);
4723 free_compare(&cmp);
4724 gen_store_gpr(dc, rd, dst);
4725 break;
4727 #endif
4728 default:
4729 goto illegal_insn;
4732 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4733 #ifdef TARGET_SPARC64
4734 int opf = GET_FIELD_SP(insn, 5, 13);
4735 rs1 = GET_FIELD(insn, 13, 17);
4736 rs2 = GET_FIELD(insn, 27, 31);
4737 if (gen_trap_ifnofpu(dc)) {
4738 goto jmp_insn;
4741 switch (opf) {
4742 case 0x000: /* VIS I edge8cc */
4743 CHECK_FPU_FEATURE(dc, VIS1);
4744 cpu_src1 = gen_load_gpr(dc, rs1);
4745 cpu_src2 = gen_load_gpr(dc, rs2);
4746 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4747 gen_store_gpr(dc, rd, cpu_dst);
4748 break;
4749 case 0x001: /* VIS II edge8n */
4750 CHECK_FPU_FEATURE(dc, VIS2);
4751 cpu_src1 = gen_load_gpr(dc, rs1);
4752 cpu_src2 = gen_load_gpr(dc, rs2);
4753 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4754 gen_store_gpr(dc, rd, cpu_dst);
4755 break;
4756 case 0x002: /* VIS I edge8lcc */
4757 CHECK_FPU_FEATURE(dc, VIS1);
4758 cpu_src1 = gen_load_gpr(dc, rs1);
4759 cpu_src2 = gen_load_gpr(dc, rs2);
4760 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4761 gen_store_gpr(dc, rd, cpu_dst);
4762 break;
4763 case 0x003: /* VIS II edge8ln */
4764 CHECK_FPU_FEATURE(dc, VIS2);
4765 cpu_src1 = gen_load_gpr(dc, rs1);
4766 cpu_src2 = gen_load_gpr(dc, rs2);
4767 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4768 gen_store_gpr(dc, rd, cpu_dst);
4769 break;
4770 case 0x004: /* VIS I edge16cc */
4771 CHECK_FPU_FEATURE(dc, VIS1);
4772 cpu_src1 = gen_load_gpr(dc, rs1);
4773 cpu_src2 = gen_load_gpr(dc, rs2);
4774 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4775 gen_store_gpr(dc, rd, cpu_dst);
4776 break;
4777 case 0x005: /* VIS II edge16n */
4778 CHECK_FPU_FEATURE(dc, VIS2);
4779 cpu_src1 = gen_load_gpr(dc, rs1);
4780 cpu_src2 = gen_load_gpr(dc, rs2);
4781 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4782 gen_store_gpr(dc, rd, cpu_dst);
4783 break;
4784 case 0x006: /* VIS I edge16lcc */
4785 CHECK_FPU_FEATURE(dc, VIS1);
4786 cpu_src1 = gen_load_gpr(dc, rs1);
4787 cpu_src2 = gen_load_gpr(dc, rs2);
4788 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4789 gen_store_gpr(dc, rd, cpu_dst);
4790 break;
4791 case 0x007: /* VIS II edge16ln */
4792 CHECK_FPU_FEATURE(dc, VIS2);
4793 cpu_src1 = gen_load_gpr(dc, rs1);
4794 cpu_src2 = gen_load_gpr(dc, rs2);
4795 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4796 gen_store_gpr(dc, rd, cpu_dst);
4797 break;
4798 case 0x008: /* VIS I edge32cc */
4799 CHECK_FPU_FEATURE(dc, VIS1);
4800 cpu_src1 = gen_load_gpr(dc, rs1);
4801 cpu_src2 = gen_load_gpr(dc, rs2);
4802 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4803 gen_store_gpr(dc, rd, cpu_dst);
4804 break;
4805 case 0x009: /* VIS II edge32n */
4806 CHECK_FPU_FEATURE(dc, VIS2);
4807 cpu_src1 = gen_load_gpr(dc, rs1);
4808 cpu_src2 = gen_load_gpr(dc, rs2);
4809 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4810 gen_store_gpr(dc, rd, cpu_dst);
4811 break;
4812 case 0x00a: /* VIS I edge32lcc */
4813 CHECK_FPU_FEATURE(dc, VIS1);
4814 cpu_src1 = gen_load_gpr(dc, rs1);
4815 cpu_src2 = gen_load_gpr(dc, rs2);
4816 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4817 gen_store_gpr(dc, rd, cpu_dst);
4818 break;
4819 case 0x00b: /* VIS II edge32ln */
4820 CHECK_FPU_FEATURE(dc, VIS2);
4821 cpu_src1 = gen_load_gpr(dc, rs1);
4822 cpu_src2 = gen_load_gpr(dc, rs2);
4823 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4824 gen_store_gpr(dc, rd, cpu_dst);
4825 break;
4826 case 0x010: /* VIS I array8 */
4827 CHECK_FPU_FEATURE(dc, VIS1);
4828 cpu_src1 = gen_load_gpr(dc, rs1);
4829 cpu_src2 = gen_load_gpr(dc, rs2);
4830 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4831 gen_store_gpr(dc, rd, cpu_dst);
4832 break;
4833 case 0x012: /* VIS I array16 */
4834 CHECK_FPU_FEATURE(dc, VIS1);
4835 cpu_src1 = gen_load_gpr(dc, rs1);
4836 cpu_src2 = gen_load_gpr(dc, rs2);
4837 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4838 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4839 gen_store_gpr(dc, rd, cpu_dst);
4840 break;
4841 case 0x014: /* VIS I array32 */
4842 CHECK_FPU_FEATURE(dc, VIS1);
4843 cpu_src1 = gen_load_gpr(dc, rs1);
4844 cpu_src2 = gen_load_gpr(dc, rs2);
4845 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4846 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4847 gen_store_gpr(dc, rd, cpu_dst);
4848 break;
4849 case 0x018: /* VIS I alignaddr */
4850 CHECK_FPU_FEATURE(dc, VIS1);
4851 cpu_src1 = gen_load_gpr(dc, rs1);
4852 cpu_src2 = gen_load_gpr(dc, rs2);
4853 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4854 gen_store_gpr(dc, rd, cpu_dst);
4855 break;
4856 case 0x01a: /* VIS I alignaddrl */
4857 CHECK_FPU_FEATURE(dc, VIS1);
4858 cpu_src1 = gen_load_gpr(dc, rs1);
4859 cpu_src2 = gen_load_gpr(dc, rs2);
4860 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4861 gen_store_gpr(dc, rd, cpu_dst);
4862 break;
4863 case 0x019: /* VIS II bmask */
4864 CHECK_FPU_FEATURE(dc, VIS2);
4865 cpu_src1 = gen_load_gpr(dc, rs1);
4866 cpu_src2 = gen_load_gpr(dc, rs2);
4867 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4868 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4869 gen_store_gpr(dc, rd, cpu_dst);
4870 break;
4871 case 0x020: /* VIS I fcmple16 */
4872 CHECK_FPU_FEATURE(dc, VIS1);
4873 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4874 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4875 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4876 gen_store_gpr(dc, rd, cpu_dst);
4877 break;
4878 case 0x022: /* VIS I fcmpne16 */
4879 CHECK_FPU_FEATURE(dc, VIS1);
4880 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4881 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4882 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4883 gen_store_gpr(dc, rd, cpu_dst);
4884 break;
4885 case 0x024: /* VIS I fcmple32 */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4888 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4889 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4890 gen_store_gpr(dc, rd, cpu_dst);
4891 break;
4892 case 0x026: /* VIS I fcmpne32 */
4893 CHECK_FPU_FEATURE(dc, VIS1);
4894 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4895 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4896 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4897 gen_store_gpr(dc, rd, cpu_dst);
4898 break;
4899 case 0x028: /* VIS I fcmpgt16 */
4900 CHECK_FPU_FEATURE(dc, VIS1);
4901 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4902 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4903 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4904 gen_store_gpr(dc, rd, cpu_dst);
4905 break;
4906 case 0x02a: /* VIS I fcmpeq16 */
4907 CHECK_FPU_FEATURE(dc, VIS1);
4908 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4909 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4910 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4911 gen_store_gpr(dc, rd, cpu_dst);
4912 break;
4913 case 0x02c: /* VIS I fcmpgt32 */
4914 CHECK_FPU_FEATURE(dc, VIS1);
4915 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4916 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4917 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4918 gen_store_gpr(dc, rd, cpu_dst);
4919 break;
4920 case 0x02e: /* VIS I fcmpeq32 */
4921 CHECK_FPU_FEATURE(dc, VIS1);
4922 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4923 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4924 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4925 gen_store_gpr(dc, rd, cpu_dst);
4926 break;
4927 case 0x031: /* VIS I fmul8x16 */
4928 CHECK_FPU_FEATURE(dc, VIS1);
4929 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4930 break;
4931 case 0x033: /* VIS I fmul8x16au */
4932 CHECK_FPU_FEATURE(dc, VIS1);
4933 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4934 break;
4935 case 0x035: /* VIS I fmul8x16al */
4936 CHECK_FPU_FEATURE(dc, VIS1);
4937 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4938 break;
4939 case 0x036: /* VIS I fmul8sux16 */
4940 CHECK_FPU_FEATURE(dc, VIS1);
4941 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4942 break;
4943 case 0x037: /* VIS I fmul8ulx16 */
4944 CHECK_FPU_FEATURE(dc, VIS1);
4945 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4946 break;
4947 case 0x038: /* VIS I fmuld8sux16 */
4948 CHECK_FPU_FEATURE(dc, VIS1);
4949 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4950 break;
4951 case 0x039: /* VIS I fmuld8ulx16 */
4952 CHECK_FPU_FEATURE(dc, VIS1);
4953 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4954 break;
4955 case 0x03a: /* VIS I fpack32 */
4956 CHECK_FPU_FEATURE(dc, VIS1);
4957 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4958 break;
4959 case 0x03b: /* VIS I fpack16 */
4960 CHECK_FPU_FEATURE(dc, VIS1);
4961 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4962 cpu_dst_32 = gen_dest_fpr_F(dc);
4963 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4964 gen_store_fpr_F(dc, rd, cpu_dst_32);
4965 break;
4966 case 0x03d: /* VIS I fpackfix */
4967 CHECK_FPU_FEATURE(dc, VIS1);
4968 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4969 cpu_dst_32 = gen_dest_fpr_F(dc);
4970 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4971 gen_store_fpr_F(dc, rd, cpu_dst_32);
4972 break;
4973 case 0x03e: /* VIS I pdist */
4974 CHECK_FPU_FEATURE(dc, VIS1);
4975 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4976 break;
4977 case 0x048: /* VIS I faligndata */
4978 CHECK_FPU_FEATURE(dc, VIS1);
4979 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4980 break;
4981 case 0x04b: /* VIS I fpmerge */
4982 CHECK_FPU_FEATURE(dc, VIS1);
4983 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4984 break;
4985 case 0x04c: /* VIS II bshuffle */
4986 CHECK_FPU_FEATURE(dc, VIS2);
4987 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4988 break;
4989 case 0x04d: /* VIS I fexpand */
4990 CHECK_FPU_FEATURE(dc, VIS1);
4991 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4992 break;
4993 case 0x050: /* VIS I fpadd16 */
4994 CHECK_FPU_FEATURE(dc, VIS1);
4995 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4996 break;
4997 case 0x051: /* VIS I fpadd16s */
4998 CHECK_FPU_FEATURE(dc, VIS1);
4999 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
5000 break;
5001 case 0x052: /* VIS I fpadd32 */
5002 CHECK_FPU_FEATURE(dc, VIS1);
5003 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
5004 break;
5005 case 0x053: /* VIS I fpadd32s */
5006 CHECK_FPU_FEATURE(dc, VIS1);
5007 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
5008 break;
5009 case 0x054: /* VIS I fpsub16 */
5010 CHECK_FPU_FEATURE(dc, VIS1);
5011 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
5012 break;
5013 case 0x055: /* VIS I fpsub16s */
5014 CHECK_FPU_FEATURE(dc, VIS1);
5015 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
5016 break;
5017 case 0x056: /* VIS I fpsub32 */
5018 CHECK_FPU_FEATURE(dc, VIS1);
5019 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
5020 break;
5021 case 0x057: /* VIS I fpsub32s */
5022 CHECK_FPU_FEATURE(dc, VIS1);
5023 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5024 break;
5025 case 0x060: /* VIS I fzero */
5026 CHECK_FPU_FEATURE(dc, VIS1);
5027 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5028 tcg_gen_movi_i64(cpu_dst_64, 0);
5029 gen_store_fpr_D(dc, rd, cpu_dst_64);
5030 break;
5031 case 0x061: /* VIS I fzeros */
5032 CHECK_FPU_FEATURE(dc, VIS1);
5033 cpu_dst_32 = gen_dest_fpr_F(dc);
5034 tcg_gen_movi_i32(cpu_dst_32, 0);
5035 gen_store_fpr_F(dc, rd, cpu_dst_32);
5036 break;
5037 case 0x062: /* VIS I fnor */
5038 CHECK_FPU_FEATURE(dc, VIS1);
5039 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5040 break;
5041 case 0x063: /* VIS I fnors */
5042 CHECK_FPU_FEATURE(dc, VIS1);
5043 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5044 break;
5045 case 0x064: /* VIS I fandnot2 */
5046 CHECK_FPU_FEATURE(dc, VIS1);
5047 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5048 break;
5049 case 0x065: /* VIS I fandnot2s */
5050 CHECK_FPU_FEATURE(dc, VIS1);
5051 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5052 break;
5053 case 0x066: /* VIS I fnot2 */
5054 CHECK_FPU_FEATURE(dc, VIS1);
5055 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5056 break;
5057 case 0x067: /* VIS I fnot2s */
5058 CHECK_FPU_FEATURE(dc, VIS1);
5059 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5060 break;
5061 case 0x068: /* VIS I fandnot1 */
5062 CHECK_FPU_FEATURE(dc, VIS1);
5063 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5064 break;
5065 case 0x069: /* VIS I fandnot1s */
5066 CHECK_FPU_FEATURE(dc, VIS1);
5067 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5068 break;
5069 case 0x06a: /* VIS I fnot1 */
5070 CHECK_FPU_FEATURE(dc, VIS1);
5071 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5072 break;
5073 case 0x06b: /* VIS I fnot1s */
5074 CHECK_FPU_FEATURE(dc, VIS1);
5075 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5076 break;
5077 case 0x06c: /* VIS I fxor */
5078 CHECK_FPU_FEATURE(dc, VIS1);
5079 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5080 break;
5081 case 0x06d: /* VIS I fxors */
5082 CHECK_FPU_FEATURE(dc, VIS1);
5083 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5084 break;
5085 case 0x06e: /* VIS I fnand */
5086 CHECK_FPU_FEATURE(dc, VIS1);
5087 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5088 break;
5089 case 0x06f: /* VIS I fnands */
5090 CHECK_FPU_FEATURE(dc, VIS1);
5091 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5092 break;
5093 case 0x070: /* VIS I fand */
5094 CHECK_FPU_FEATURE(dc, VIS1);
5095 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5096 break;
5097 case 0x071: /* VIS I fands */
5098 CHECK_FPU_FEATURE(dc, VIS1);
5099 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5100 break;
5101 case 0x072: /* VIS I fxnor */
5102 CHECK_FPU_FEATURE(dc, VIS1);
5103 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5104 break;
5105 case 0x073: /* VIS I fxnors */
5106 CHECK_FPU_FEATURE(dc, VIS1);
5107 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5108 break;
5109 case 0x074: /* VIS I fsrc1 */
5110 CHECK_FPU_FEATURE(dc, VIS1);
5111 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5112 gen_store_fpr_D(dc, rd, cpu_src1_64);
5113 break;
5114 case 0x075: /* VIS I fsrc1s */
5115 CHECK_FPU_FEATURE(dc, VIS1);
5116 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5117 gen_store_fpr_F(dc, rd, cpu_src1_32);
5118 break;
5119 case 0x076: /* VIS I fornot2 */
5120 CHECK_FPU_FEATURE(dc, VIS1);
5121 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5122 break;
5123 case 0x077: /* VIS I fornot2s */
5124 CHECK_FPU_FEATURE(dc, VIS1);
5125 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5126 break;
5127 case 0x078: /* VIS I fsrc2 */
5128 CHECK_FPU_FEATURE(dc, VIS1);
5129 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5130 gen_store_fpr_D(dc, rd, cpu_src1_64);
5131 break;
5132 case 0x079: /* VIS I fsrc2s */
5133 CHECK_FPU_FEATURE(dc, VIS1);
5134 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5135 gen_store_fpr_F(dc, rd, cpu_src1_32);
5136 break;
5137 case 0x07a: /* VIS I fornot1 */
5138 CHECK_FPU_FEATURE(dc, VIS1);
5139 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5140 break;
5141 case 0x07b: /* VIS I fornot1s */
5142 CHECK_FPU_FEATURE(dc, VIS1);
5143 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5144 break;
5145 case 0x07c: /* VIS I for */
5146 CHECK_FPU_FEATURE(dc, VIS1);
5147 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5148 break;
5149 case 0x07d: /* VIS I fors */
5150 CHECK_FPU_FEATURE(dc, VIS1);
5151 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5152 break;
5153 case 0x07e: /* VIS I fone */
5154 CHECK_FPU_FEATURE(dc, VIS1);
5155 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5156 tcg_gen_movi_i64(cpu_dst_64, -1);
5157 gen_store_fpr_D(dc, rd, cpu_dst_64);
5158 break;
5159 case 0x07f: /* VIS I fones */
5160 CHECK_FPU_FEATURE(dc, VIS1);
5161 cpu_dst_32 = gen_dest_fpr_F(dc);
5162 tcg_gen_movi_i32(cpu_dst_32, -1);
5163 gen_store_fpr_F(dc, rd, cpu_dst_32);
5164 break;
5165 case 0x080: /* VIS I shutdown */
5166 case 0x081: /* VIS II siam */
5167 // XXX
5168 goto illegal_insn;
5169 default:
5170 goto illegal_insn;
5172 #else
5173 goto ncp_insn;
5174 #endif
5175 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5176 #ifdef TARGET_SPARC64
5177 goto illegal_insn;
5178 #else
5179 goto ncp_insn;
5180 #endif
5181 #ifdef TARGET_SPARC64
5182 } else if (xop == 0x39) { /* V9 return */
5183 save_state(dc);
5184 cpu_src1 = get_src1(dc, insn);
5185 cpu_tmp0 = get_temp_tl(dc);
5186 if (IS_IMM) { /* immediate */
5187 simm = GET_FIELDs(insn, 19, 31);
5188 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5189 } else { /* register */
5190 rs2 = GET_FIELD(insn, 27, 31);
5191 if (rs2) {
5192 cpu_src2 = gen_load_gpr(dc, rs2);
5193 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5194 } else {
5195 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5198 gen_helper_restore(cpu_env);
5199 gen_mov_pc_npc(dc);
5200 gen_check_align(cpu_tmp0, 3);
5201 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5202 dc->npc = DYNAMIC_PC;
5203 goto jmp_insn;
5204 #endif
5205 } else {
5206 cpu_src1 = get_src1(dc, insn);
5207 cpu_tmp0 = get_temp_tl(dc);
5208 if (IS_IMM) { /* immediate */
5209 simm = GET_FIELDs(insn, 19, 31);
5210 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5211 } else { /* register */
5212 rs2 = GET_FIELD(insn, 27, 31);
5213 if (rs2) {
5214 cpu_src2 = gen_load_gpr(dc, rs2);
5215 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5216 } else {
5217 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5220 switch (xop) {
5221 case 0x38: /* jmpl */
5223 TCGv t = gen_dest_gpr(dc, rd);
5224 tcg_gen_movi_tl(t, dc->pc);
5225 gen_store_gpr(dc, rd, t);
5227 gen_mov_pc_npc(dc);
5228 gen_check_align(cpu_tmp0, 3);
5229 gen_address_mask(dc, cpu_tmp0);
5230 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5231 dc->npc = DYNAMIC_PC;
5233 goto jmp_insn;
5234 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5235 case 0x39: /* rett, V9 return */
5237 if (!supervisor(dc))
5238 goto priv_insn;
5239 gen_mov_pc_npc(dc);
5240 gen_check_align(cpu_tmp0, 3);
5241 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5242 dc->npc = DYNAMIC_PC;
5243 gen_helper_rett(cpu_env);
5245 goto jmp_insn;
5246 #endif
5247 case 0x3b: /* flush */
5248 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5249 goto unimp_flush;
5250 /* nop */
5251 break;
5252 case 0x3c: /* save */
5253 gen_helper_save(cpu_env);
5254 gen_store_gpr(dc, rd, cpu_tmp0);
5255 break;
5256 case 0x3d: /* restore */
5257 gen_helper_restore(cpu_env);
5258 gen_store_gpr(dc, rd, cpu_tmp0);
5259 break;
5260 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5261 case 0x3e: /* V9 done/retry */
5263 switch (rd) {
5264 case 0:
5265 if (!supervisor(dc))
5266 goto priv_insn;
5267 dc->npc = DYNAMIC_PC;
5268 dc->pc = DYNAMIC_PC;
5269 gen_helper_done(cpu_env);
5270 goto jmp_insn;
5271 case 1:
5272 if (!supervisor(dc))
5273 goto priv_insn;
5274 dc->npc = DYNAMIC_PC;
5275 dc->pc = DYNAMIC_PC;
5276 gen_helper_retry(cpu_env);
5277 goto jmp_insn;
5278 default:
5279 goto illegal_insn;
5282 break;
5283 #endif
5284 default:
5285 goto illegal_insn;
5288 break;
5290 break;
5291 case 3: /* load/store instructions */
5293 unsigned int xop = GET_FIELD(insn, 7, 12);
5294 /* ??? gen_address_mask prevents us from using a source
5295 register directly. Always generate a temporary. */
5296 TCGv cpu_addr = get_temp_tl(dc);
5298 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5299 if (xop == 0x3c || xop == 0x3e) {
5300 /* V9 casa/casxa : no offset */
5301 } else if (IS_IMM) { /* immediate */
5302 simm = GET_FIELDs(insn, 19, 31);
5303 if (simm != 0) {
5304 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5306 } else { /* register */
5307 rs2 = GET_FIELD(insn, 27, 31);
5308 if (rs2 != 0) {
5309 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5312 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5313 (xop > 0x17 && xop <= 0x1d ) ||
5314 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5315 TCGv cpu_val = gen_dest_gpr(dc, rd);
5317 switch (xop) {
5318 case 0x0: /* ld, V9 lduw, load unsigned word */
5319 gen_address_mask(dc, cpu_addr);
5320 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5321 break;
5322 case 0x1: /* ldub, load unsigned byte */
5323 gen_address_mask(dc, cpu_addr);
5324 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5325 break;
5326 case 0x2: /* lduh, load unsigned halfword */
5327 gen_address_mask(dc, cpu_addr);
5328 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5329 break;
5330 case 0x3: /* ldd, load double word */
5331 if (rd & 1)
5332 goto illegal_insn;
5333 else {
5334 TCGv_i64 t64;
5336 gen_address_mask(dc, cpu_addr);
5337 t64 = tcg_temp_new_i64();
5338 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5339 tcg_gen_trunc_i64_tl(cpu_val, t64);
5340 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5341 gen_store_gpr(dc, rd + 1, cpu_val);
5342 tcg_gen_shri_i64(t64, t64, 32);
5343 tcg_gen_trunc_i64_tl(cpu_val, t64);
5344 tcg_temp_free_i64(t64);
5345 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5347 break;
5348 case 0x9: /* ldsb, load signed byte */
5349 gen_address_mask(dc, cpu_addr);
5350 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5351 break;
5352 case 0xa: /* ldsh, load signed halfword */
5353 gen_address_mask(dc, cpu_addr);
5354 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5355 break;
5356 case 0xd: /* ldstub */
5357 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5358 break;
5359 case 0x0f:
5360 /* swap, swap register with memory. Also atomically */
5361 CHECK_IU_FEATURE(dc, SWAP);
5362 cpu_src1 = gen_load_gpr(dc, rd);
5363 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5364 dc->mem_idx, MO_TEUL);
5365 break;
5366 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5367 case 0x10: /* lda, V9 lduwa, load word alternate */
5368 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5369 break;
5370 case 0x11: /* lduba, load unsigned byte alternate */
5371 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5372 break;
5373 case 0x12: /* lduha, load unsigned halfword alternate */
5374 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5375 break;
5376 case 0x13: /* ldda, load double word alternate */
5377 if (rd & 1) {
5378 goto illegal_insn;
5380 gen_ldda_asi(dc, cpu_addr, insn, rd);
5381 goto skip_move;
5382 case 0x19: /* ldsba, load signed byte alternate */
5383 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5384 break;
5385 case 0x1a: /* ldsha, load signed halfword alternate */
5386 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5387 break;
5388 case 0x1d: /* ldstuba -- XXX: should be atomically */
5389 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5390 break;
5391 case 0x1f: /* swapa, swap reg with alt. memory. Also
5392 atomically */
5393 CHECK_IU_FEATURE(dc, SWAP);
5394 cpu_src1 = gen_load_gpr(dc, rd);
5395 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5396 break;
5398 #ifndef TARGET_SPARC64
5399 case 0x30: /* ldc */
5400 case 0x31: /* ldcsr */
5401 case 0x33: /* lddc */
5402 goto ncp_insn;
5403 #endif
5404 #endif
5405 #ifdef TARGET_SPARC64
5406 case 0x08: /* V9 ldsw */
5407 gen_address_mask(dc, cpu_addr);
5408 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5409 break;
5410 case 0x0b: /* V9 ldx */
5411 gen_address_mask(dc, cpu_addr);
5412 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5413 break;
5414 case 0x18: /* V9 ldswa */
5415 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5416 break;
5417 case 0x1b: /* V9 ldxa */
5418 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5419 break;
5420 case 0x2d: /* V9 prefetch, no effect */
5421 goto skip_move;
5422 case 0x30: /* V9 ldfa */
5423 if (gen_trap_ifnofpu(dc)) {
5424 goto jmp_insn;
5426 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5427 gen_update_fprs_dirty(dc, rd);
5428 goto skip_move;
5429 case 0x33: /* V9 lddfa */
5430 if (gen_trap_ifnofpu(dc)) {
5431 goto jmp_insn;
5433 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5434 gen_update_fprs_dirty(dc, DFPREG(rd));
5435 goto skip_move;
5436 case 0x3d: /* V9 prefetcha, no effect */
5437 goto skip_move;
5438 case 0x32: /* V9 ldqfa */
5439 CHECK_FPU_FEATURE(dc, FLOAT128);
5440 if (gen_trap_ifnofpu(dc)) {
5441 goto jmp_insn;
5443 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5444 gen_update_fprs_dirty(dc, QFPREG(rd));
5445 goto skip_move;
5446 #endif
5447 default:
5448 goto illegal_insn;
5450 gen_store_gpr(dc, rd, cpu_val);
5451 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5452 skip_move: ;
5453 #endif
5454 } else if (xop >= 0x20 && xop < 0x24) {
5455 if (gen_trap_ifnofpu(dc)) {
5456 goto jmp_insn;
5458 switch (xop) {
5459 case 0x20: /* ldf, load fpreg */
5460 gen_address_mask(dc, cpu_addr);
5461 cpu_dst_32 = gen_dest_fpr_F(dc);
5462 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5463 dc->mem_idx, MO_TEUL);
5464 gen_store_fpr_F(dc, rd, cpu_dst_32);
5465 break;
5466 case 0x21: /* ldfsr, V9 ldxfsr */
5467 #ifdef TARGET_SPARC64
5468 gen_address_mask(dc, cpu_addr);
5469 if (rd == 1) {
5470 TCGv_i64 t64 = tcg_temp_new_i64();
5471 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5472 dc->mem_idx, MO_TEQ);
5473 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5474 tcg_temp_free_i64(t64);
5475 break;
5477 #endif
5478 cpu_dst_32 = get_temp_i32(dc);
5479 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5480 dc->mem_idx, MO_TEUL);
5481 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5482 break;
5483 case 0x22: /* ldqf, load quad fpreg */
5484 CHECK_FPU_FEATURE(dc, FLOAT128);
5485 gen_address_mask(dc, cpu_addr);
5486 cpu_src1_64 = tcg_temp_new_i64();
5487 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5488 MO_TEQ | MO_ALIGN_4);
5489 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5490 cpu_src2_64 = tcg_temp_new_i64();
5491 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5492 MO_TEQ | MO_ALIGN_4);
5493 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5494 tcg_temp_free_i64(cpu_src1_64);
5495 tcg_temp_free_i64(cpu_src2_64);
5496 break;
5497 case 0x23: /* lddf, load double fpreg */
5498 gen_address_mask(dc, cpu_addr);
5499 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5500 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5501 MO_TEQ | MO_ALIGN_4);
5502 gen_store_fpr_D(dc, rd, cpu_dst_64);
5503 break;
5504 default:
5505 goto illegal_insn;
5507 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5508 xop == 0xe || xop == 0x1e) {
5509 TCGv cpu_val = gen_load_gpr(dc, rd);
5511 switch (xop) {
5512 case 0x4: /* st, store word */
5513 gen_address_mask(dc, cpu_addr);
5514 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5515 break;
5516 case 0x5: /* stb, store byte */
5517 gen_address_mask(dc, cpu_addr);
5518 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5519 break;
5520 case 0x6: /* sth, store halfword */
5521 gen_address_mask(dc, cpu_addr);
5522 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5523 break;
5524 case 0x7: /* std, store double word */
5525 if (rd & 1)
5526 goto illegal_insn;
5527 else {
5528 TCGv_i64 t64;
5529 TCGv lo;
5531 gen_address_mask(dc, cpu_addr);
5532 lo = gen_load_gpr(dc, rd + 1);
5533 t64 = tcg_temp_new_i64();
5534 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5535 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5536 tcg_temp_free_i64(t64);
5538 break;
5539 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5540 case 0x14: /* sta, V9 stwa, store word alternate */
5541 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5542 break;
5543 case 0x15: /* stba, store byte alternate */
5544 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5545 break;
5546 case 0x16: /* stha, store halfword alternate */
5547 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5548 break;
5549 case 0x17: /* stda, store double word alternate */
5550 if (rd & 1) {
5551 goto illegal_insn;
5553 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5554 break;
5555 #endif
5556 #ifdef TARGET_SPARC64
5557 case 0x0e: /* V9 stx */
5558 gen_address_mask(dc, cpu_addr);
5559 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5560 break;
5561 case 0x1e: /* V9 stxa */
5562 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5563 break;
5564 #endif
5565 default:
5566 goto illegal_insn;
5568 } else if (xop > 0x23 && xop < 0x28) {
5569 if (gen_trap_ifnofpu(dc)) {
5570 goto jmp_insn;
5572 switch (xop) {
5573 case 0x24: /* stf, store fpreg */
5574 gen_address_mask(dc, cpu_addr);
5575 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5576 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5577 dc->mem_idx, MO_TEUL);
5578 break;
5579 case 0x25: /* stfsr, V9 stxfsr */
5581 #ifdef TARGET_SPARC64
5582 gen_address_mask(dc, cpu_addr);
5583 if (rd == 1) {
5584 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5585 break;
5587 #endif
5588 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5590 break;
5591 case 0x26:
5592 #ifdef TARGET_SPARC64
5593 /* V9 stqf, store quad fpreg */
5594 CHECK_FPU_FEATURE(dc, FLOAT128);
5595 gen_address_mask(dc, cpu_addr);
5596 /* ??? While stqf only requires 4-byte alignment, it is
5597 legal for the cpu to signal the unaligned exception.
5598 The OS trap handler is then required to fix it up.
5599 For qemu, this avoids having to probe the second page
5600 before performing the first write. */
5601 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5602 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5603 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5604 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5605 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5606 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5607 dc->mem_idx, MO_TEQ);
5608 break;
5609 #else /* !TARGET_SPARC64 */
5610 /* stdfq, store floating point queue */
5611 #if defined(CONFIG_USER_ONLY)
5612 goto illegal_insn;
5613 #else
5614 if (!supervisor(dc))
5615 goto priv_insn;
5616 if (gen_trap_ifnofpu(dc)) {
5617 goto jmp_insn;
5619 goto nfq_insn;
5620 #endif
5621 #endif
5622 case 0x27: /* stdf, store double fpreg */
5623 gen_address_mask(dc, cpu_addr);
5624 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5625 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5626 MO_TEQ | MO_ALIGN_4);
5627 break;
5628 default:
5629 goto illegal_insn;
5631 } else if (xop > 0x33 && xop < 0x3f) {
5632 switch (xop) {
5633 #ifdef TARGET_SPARC64
5634 case 0x34: /* V9 stfa */
5635 if (gen_trap_ifnofpu(dc)) {
5636 goto jmp_insn;
5638 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5639 break;
5640 case 0x36: /* V9 stqfa */
5642 CHECK_FPU_FEATURE(dc, FLOAT128);
5643 if (gen_trap_ifnofpu(dc)) {
5644 goto jmp_insn;
5646 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5648 break;
5649 case 0x37: /* V9 stdfa */
5650 if (gen_trap_ifnofpu(dc)) {
5651 goto jmp_insn;
5653 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5654 break;
5655 case 0x3e: /* V9 casxa */
5656 rs2 = GET_FIELD(insn, 27, 31);
5657 cpu_src2 = gen_load_gpr(dc, rs2);
5658 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5659 break;
5660 #else
5661 case 0x34: /* stc */
5662 case 0x35: /* stcsr */
5663 case 0x36: /* stdcq */
5664 case 0x37: /* stdc */
5665 goto ncp_insn;
5666 #endif
5667 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5668 case 0x3c: /* V9 or LEON3 casa */
5669 #ifndef TARGET_SPARC64
5670 CHECK_IU_FEATURE(dc, CASA);
5671 #endif
5672 rs2 = GET_FIELD(insn, 27, 31);
5673 cpu_src2 = gen_load_gpr(dc, rs2);
5674 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5675 break;
5676 #endif
5677 default:
5678 goto illegal_insn;
5680 } else {
5681 goto illegal_insn;
5684 break;
5686 /* default case for non jump instructions */
5687 if (dc->npc == DYNAMIC_PC) {
5688 dc->pc = DYNAMIC_PC;
5689 gen_op_next_insn();
5690 } else if (dc->npc == JUMP_PC) {
5691 /* we can do a static jump */
5692 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5693 dc->is_br = 1;
5694 } else {
5695 dc->pc = dc->npc;
5696 dc->npc = dc->npc + 4;
5698 jmp_insn:
5699 goto egress;
5700 illegal_insn:
5701 gen_exception(dc, TT_ILL_INSN);
5702 goto egress;
5703 unimp_flush:
5704 gen_exception(dc, TT_UNIMP_FLUSH);
5705 goto egress;
5706 #if !defined(CONFIG_USER_ONLY)
5707 priv_insn:
5708 gen_exception(dc, TT_PRIV_INSN);
5709 goto egress;
5710 #endif
5711 nfpu_insn:
5712 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5713 goto egress;
5714 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5715 nfq_insn:
5716 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5717 goto egress;
5718 #endif
5719 #ifndef TARGET_SPARC64
5720 ncp_insn:
5721 gen_exception(dc, TT_NCP_INSN);
5722 goto egress;
5723 #endif
5724 egress:
5725 if (dc->n_t32 != 0) {
5726 int i;
5727 for (i = dc->n_t32 - 1; i >= 0; --i) {
5728 tcg_temp_free_i32(dc->t32[i]);
5730 dc->n_t32 = 0;
5732 if (dc->n_ttl != 0) {
5733 int i;
5734 for (i = dc->n_ttl - 1; i >= 0; --i) {
5735 tcg_temp_free(dc->ttl[i]);
5737 dc->n_ttl = 0;
5741 void gen_intermediate_code(CPUState *cs, TranslationBlock * tb)
5743 CPUSPARCState *env = cs->env_ptr;
5744 target_ulong pc_start, last_pc;
5745 DisasContext dc1, *dc = &dc1;
5746 int num_insns;
5747 int max_insns;
5748 unsigned int insn;
5750 memset(dc, 0, sizeof(DisasContext));
5751 dc->tb = tb;
5752 pc_start = tb->pc;
5753 dc->pc = pc_start;
5754 last_pc = dc->pc;
5755 dc->npc = (target_ulong) tb->cs_base;
5756 dc->cc_op = CC_OP_DYNAMIC;
5757 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5758 dc->def = &env->def;
5759 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5760 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5761 dc->singlestep = (cs->singlestep_enabled || singlestep);
5762 #ifndef CONFIG_USER_ONLY
5763 dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5764 #endif
5765 #ifdef TARGET_SPARC64
5766 dc->fprs_dirty = 0;
5767 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5768 #ifndef CONFIG_USER_ONLY
5769 dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5770 #endif
5771 #endif
5773 num_insns = 0;
5774 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5775 if (max_insns == 0) {
5776 max_insns = CF_COUNT_MASK;
5778 if (max_insns > TCG_MAX_INSNS) {
5779 max_insns = TCG_MAX_INSNS;
5782 gen_tb_start(tb);
5783 do {
5784 if (dc->npc & JUMP_PC) {
5785 assert(dc->jump_pc[1] == dc->pc + 4);
5786 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5787 } else {
5788 tcg_gen_insn_start(dc->pc, dc->npc);
5790 num_insns++;
5791 last_pc = dc->pc;
5793 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5794 if (dc->pc != pc_start) {
5795 save_state(dc);
5797 gen_helper_debug(cpu_env);
5798 tcg_gen_exit_tb(0);
5799 dc->is_br = 1;
5800 goto exit_gen_loop;
5803 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5804 gen_io_start();
5807 insn = cpu_ldl_code(env, dc->pc);
5809 disas_sparc_insn(dc, insn);
5811 if (dc->is_br)
5812 break;
5813 /* if the next PC is different, we abort now */
5814 if (dc->pc != (last_pc + 4))
5815 break;
5816 /* if we reach a page boundary, we stop generation so that the
5817 PC of a TT_TFAULT exception is always in the right page */
5818 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5819 break;
5820 /* if single step mode, we generate only one instruction and
5821 generate an exception */
5822 if (dc->singlestep) {
5823 break;
5825 } while (!tcg_op_buf_full() &&
5826 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5827 num_insns < max_insns);
5829 exit_gen_loop:
5830 if (tb_cflags(tb) & CF_LAST_IO) {
5831 gen_io_end();
5833 if (!dc->is_br) {
5834 if (dc->pc != DYNAMIC_PC &&
5835 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5836 /* static PC and NPC: we can use direct chaining */
5837 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5838 } else {
5839 if (dc->pc != DYNAMIC_PC) {
5840 tcg_gen_movi_tl(cpu_pc, dc->pc);
5842 save_npc(dc);
5843 tcg_gen_exit_tb(0);
5846 gen_tb_end(tb, num_insns);
5848 tb->size = last_pc + 4 - pc_start;
5849 tb->icount = num_insns;
5851 #ifdef DEBUG_DISAS
5852 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5853 && qemu_log_in_addr_range(pc_start)) {
5854 qemu_log_lock();
5855 qemu_log("--------------\n");
5856 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5857 log_target_disas(cs, pc_start, last_pc + 4 - pc_start);
5858 qemu_log("\n");
5859 qemu_log_unlock();
5861 #endif
5864 void sparc_tcg_init(void)
5866 static const char gregnames[32][4] = {
5867 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5868 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5869 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5870 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5872 static const char fregnames[32][4] = {
5873 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5874 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5875 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5876 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5879 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5880 #ifdef TARGET_SPARC64
5881 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5882 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5883 #else
5884 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5885 #endif
5886 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5887 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5890 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5891 #ifdef TARGET_SPARC64
5892 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5893 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5894 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5895 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5896 "hstick_cmpr" },
5897 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5898 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5899 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5900 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5901 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5902 #endif
5903 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5904 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5905 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5906 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5907 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5908 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5909 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5910 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5911 #ifndef CONFIG_USER_ONLY
5912 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5913 #endif
5916 unsigned int i;
5918 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5919 offsetof(CPUSPARCState, regwptr),
5920 "regwptr");
5922 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5923 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5926 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5927 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5930 cpu_regs[0] = NULL;
5931 for (i = 1; i < 8; ++i) {
5932 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5933 offsetof(CPUSPARCState, gregs[i]),
5934 gregnames[i]);
5937 for (i = 8; i < 32; ++i) {
5938 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5939 (i - 8) * sizeof(target_ulong),
5940 gregnames[i]);
5943 for (i = 0; i < TARGET_DPREGS; i++) {
5944 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5945 offsetof(CPUSPARCState, fpr[i]),
5946 fregnames[i]);
5950 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5951 target_ulong *data)
5953 target_ulong pc = data[0];
5954 target_ulong npc = data[1];
5956 env->pc = pc;
5957 if (npc == DYNAMIC_PC) {
5958 /* dynamic NPC: already stored */
5959 } else if (npc & JUMP_PC) {
5960 /* jump PC: use 'cond' and the jump targets of the translation */
5961 if (env->cond) {
5962 env->npc = npc & ~3;
5963 } else {
5964 env->npc = pc + 4;
5966 } else {
5967 env->npc = npc;