crypto: extend mode as a parameter in qcrypto_cipher_supports()
[qemu/ar7.git] / target-sparc / translate.c
blobe7691e44587d38e42c84cc5eef60314353cb5bf9
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
34 #include "asi.h"
37 #define DEBUG_DISAS
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
51 static TCGv cpu_y;
52 #ifndef CONFIG_USER_ONLY
53 static TCGv cpu_tbr;
54 #endif
55 static TCGv cpu_cond;
56 #ifdef TARGET_SPARC64
57 static TCGv_i32 cpu_xcc, cpu_fprs;
58 static TCGv cpu_gsr;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
61 #else
62 static TCGv cpu_wim;
63 #endif
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
73 int is_br;
74 int mem_idx;
75 int fpu_enabled;
76 int address_mask_32bit;
77 int singlestep;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
80 sparc_def_t *def;
81 TCGv_i32 t32[3];
82 TCGv ttl[5];
83 int n_t32;
84 int n_ttl;
85 #ifdef TARGET_SPARC64
86 int fprs_dirty;
87 int asi;
88 #endif
89 } DisasContext;
91 typedef struct {
92 TCGCond cond;
93 bool is_bool;
94 bool g1, g2;
95 TCGv c1, c2;
96 } DisasCompare;
98 // This function uses non-native bit order
99 #define GET_FIELD(X, FROM, TO) \
100 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
102 // This function uses the order in the manuals, i.e. bit 0 is 2^0
103 #define GET_FIELD_SP(X, FROM, TO) \
104 GET_FIELD(X, 31 - (TO), 31 - (FROM))
106 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
107 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
109 #ifdef TARGET_SPARC64
110 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
111 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
112 #else
113 #define DFPREG(r) (r & 0x1e)
114 #define QFPREG(r) (r & 0x1c)
115 #endif
117 #define UA2005_HTRAP_MASK 0xff
118 #define V8_TRAP_MASK 0x7f
120 static int sign_extend(int x, int len)
122 len = 32 - len;
123 return (x << len) >> len;
126 #define IS_IMM (insn & (1<<13))
128 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
130 TCGv_i32 t;
131 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
132 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
133 return t;
136 static inline TCGv get_temp_tl(DisasContext *dc)
138 TCGv t;
139 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
140 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
141 return t;
144 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
146 #if defined(TARGET_SPARC64)
147 int bit = (rd < 32) ? 1 : 2;
148 /* If we know we've already set this bit within the TB,
149 we can avoid setting it again. */
150 if (!(dc->fprs_dirty & bit)) {
151 dc->fprs_dirty |= bit;
152 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
154 #endif
157 /* floating point registers moves */
158 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
160 #if TCG_TARGET_REG_BITS == 32
161 if (src & 1) {
162 return TCGV_LOW(cpu_fpr[src / 2]);
163 } else {
164 return TCGV_HIGH(cpu_fpr[src / 2]);
166 #else
167 if (src & 1) {
168 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
169 } else {
170 TCGv_i32 ret = get_temp_i32(dc);
171 TCGv_i64 t = tcg_temp_new_i64();
173 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
174 tcg_gen_extrl_i64_i32(ret, t);
175 tcg_temp_free_i64(t);
177 return ret;
179 #endif
182 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
184 #if TCG_TARGET_REG_BITS == 32
185 if (dst & 1) {
186 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
187 } else {
188 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
190 #else
191 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
192 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
193 (dst & 1 ? 0 : 32), 32);
194 #endif
195 gen_update_fprs_dirty(dc, dst);
198 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
200 return get_temp_i32(dc);
203 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
205 src = DFPREG(src);
206 return cpu_fpr[src / 2];
209 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
211 dst = DFPREG(dst);
212 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
213 gen_update_fprs_dirty(dc, dst);
216 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
218 return cpu_fpr[DFPREG(dst) / 2];
221 static void gen_op_load_fpr_QT0(unsigned int src)
223 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
224 offsetof(CPU_QuadU, ll.upper));
225 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
226 offsetof(CPU_QuadU, ll.lower));
229 static void gen_op_load_fpr_QT1(unsigned int src)
231 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
232 offsetof(CPU_QuadU, ll.upper));
233 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
234 offsetof(CPU_QuadU, ll.lower));
237 static void gen_op_store_QT0_fpr(unsigned int dst)
239 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
240 offsetof(CPU_QuadU, ll.upper));
241 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
242 offsetof(CPU_QuadU, ll.lower));
245 #ifdef TARGET_SPARC64
246 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
248 rd = QFPREG(rd);
249 rs = QFPREG(rs);
251 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
252 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
253 gen_update_fprs_dirty(dc, rd);
255 #endif
257 /* moves */
258 #ifdef CONFIG_USER_ONLY
259 #define supervisor(dc) 0
260 #ifdef TARGET_SPARC64
261 #define hypervisor(dc) 0
262 #endif
263 #else
264 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
265 #ifdef TARGET_SPARC64
266 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
267 #else
268 #endif
269 #endif
271 #ifdef TARGET_SPARC64
272 #ifndef TARGET_ABI32
273 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
274 #else
275 #define AM_CHECK(dc) (1)
276 #endif
277 #endif
279 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
281 #ifdef TARGET_SPARC64
282 if (AM_CHECK(dc))
283 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
284 #endif
287 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
289 if (reg > 0) {
290 assert(reg < 32);
291 return cpu_regs[reg];
292 } else {
293 TCGv t = get_temp_tl(dc);
294 tcg_gen_movi_tl(t, 0);
295 return t;
299 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
301 if (reg > 0) {
302 assert(reg < 32);
303 tcg_gen_mov_tl(cpu_regs[reg], v);
307 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
309 if (reg > 0) {
310 assert(reg < 32);
311 return cpu_regs[reg];
312 } else {
313 return get_temp_tl(dc);
317 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
318 target_ulong npc)
320 if (unlikely(s->singlestep)) {
321 return false;
324 #ifndef CONFIG_USER_ONLY
325 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
326 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
327 #else
328 return true;
329 #endif
332 static inline void gen_goto_tb(DisasContext *s, int tb_num,
333 target_ulong pc, target_ulong npc)
335 if (use_goto_tb(s, pc, npc)) {
336 /* jump to same page: we can use a direct jump */
337 tcg_gen_goto_tb(tb_num);
338 tcg_gen_movi_tl(cpu_pc, pc);
339 tcg_gen_movi_tl(cpu_npc, npc);
340 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
341 } else {
342 /* jump to another page: currently not optimized */
343 tcg_gen_movi_tl(cpu_pc, pc);
344 tcg_gen_movi_tl(cpu_npc, npc);
345 tcg_gen_exit_tb(0);
349 // XXX suboptimal
350 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
352 tcg_gen_extu_i32_tl(reg, src);
353 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
354 tcg_gen_andi_tl(reg, reg, 0x1);
357 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
359 tcg_gen_extu_i32_tl(reg, src);
360 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
361 tcg_gen_andi_tl(reg, reg, 0x1);
364 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
366 tcg_gen_extu_i32_tl(reg, src);
367 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
368 tcg_gen_andi_tl(reg, reg, 0x1);
371 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
373 tcg_gen_extu_i32_tl(reg, src);
374 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
375 tcg_gen_andi_tl(reg, reg, 0x1);
378 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
380 tcg_gen_mov_tl(cpu_cc_src, src1);
381 tcg_gen_mov_tl(cpu_cc_src2, src2);
382 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
383 tcg_gen_mov_tl(dst, cpu_cc_dst);
386 static TCGv_i32 gen_add32_carry32(void)
388 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
390 /* Carry is computed from a previous add: (dst < src) */
391 #if TARGET_LONG_BITS == 64
392 cc_src1_32 = tcg_temp_new_i32();
393 cc_src2_32 = tcg_temp_new_i32();
394 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
395 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
396 #else
397 cc_src1_32 = cpu_cc_dst;
398 cc_src2_32 = cpu_cc_src;
399 #endif
401 carry_32 = tcg_temp_new_i32();
402 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
404 #if TARGET_LONG_BITS == 64
405 tcg_temp_free_i32(cc_src1_32);
406 tcg_temp_free_i32(cc_src2_32);
407 #endif
409 return carry_32;
412 static TCGv_i32 gen_sub32_carry32(void)
414 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
416 /* Carry is computed from a previous borrow: (src1 < src2) */
417 #if TARGET_LONG_BITS == 64
418 cc_src1_32 = tcg_temp_new_i32();
419 cc_src2_32 = tcg_temp_new_i32();
420 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
421 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
422 #else
423 cc_src1_32 = cpu_cc_src;
424 cc_src2_32 = cpu_cc_src2;
425 #endif
427 carry_32 = tcg_temp_new_i32();
428 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
430 #if TARGET_LONG_BITS == 64
431 tcg_temp_free_i32(cc_src1_32);
432 tcg_temp_free_i32(cc_src2_32);
433 #endif
435 return carry_32;
438 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
439 TCGv src2, int update_cc)
441 TCGv_i32 carry_32;
442 TCGv carry;
444 switch (dc->cc_op) {
445 case CC_OP_DIV:
446 case CC_OP_LOGIC:
447 /* Carry is known to be zero. Fall back to plain ADD. */
448 if (update_cc) {
449 gen_op_add_cc(dst, src1, src2);
450 } else {
451 tcg_gen_add_tl(dst, src1, src2);
453 return;
455 case CC_OP_ADD:
456 case CC_OP_TADD:
457 case CC_OP_TADDTV:
458 if (TARGET_LONG_BITS == 32) {
459 /* We can re-use the host's hardware carry generation by using
460 an ADD2 opcode. We discard the low part of the output.
461 Ideally we'd combine this operation with the add that
462 generated the carry in the first place. */
463 carry = tcg_temp_new();
464 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
465 tcg_temp_free(carry);
466 goto add_done;
468 carry_32 = gen_add32_carry32();
469 break;
471 case CC_OP_SUB:
472 case CC_OP_TSUB:
473 case CC_OP_TSUBTV:
474 carry_32 = gen_sub32_carry32();
475 break;
477 default:
478 /* We need external help to produce the carry. */
479 carry_32 = tcg_temp_new_i32();
480 gen_helper_compute_C_icc(carry_32, cpu_env);
481 break;
484 #if TARGET_LONG_BITS == 64
485 carry = tcg_temp_new();
486 tcg_gen_extu_i32_i64(carry, carry_32);
487 #else
488 carry = carry_32;
489 #endif
491 tcg_gen_add_tl(dst, src1, src2);
492 tcg_gen_add_tl(dst, dst, carry);
494 tcg_temp_free_i32(carry_32);
495 #if TARGET_LONG_BITS == 64
496 tcg_temp_free(carry);
497 #endif
499 add_done:
500 if (update_cc) {
501 tcg_gen_mov_tl(cpu_cc_src, src1);
502 tcg_gen_mov_tl(cpu_cc_src2, src2);
503 tcg_gen_mov_tl(cpu_cc_dst, dst);
504 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
505 dc->cc_op = CC_OP_ADDX;
509 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
511 tcg_gen_mov_tl(cpu_cc_src, src1);
512 tcg_gen_mov_tl(cpu_cc_src2, src2);
513 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
514 tcg_gen_mov_tl(dst, cpu_cc_dst);
517 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
518 TCGv src2, int update_cc)
520 TCGv_i32 carry_32;
521 TCGv carry;
523 switch (dc->cc_op) {
524 case CC_OP_DIV:
525 case CC_OP_LOGIC:
526 /* Carry is known to be zero. Fall back to plain SUB. */
527 if (update_cc) {
528 gen_op_sub_cc(dst, src1, src2);
529 } else {
530 tcg_gen_sub_tl(dst, src1, src2);
532 return;
534 case CC_OP_ADD:
535 case CC_OP_TADD:
536 case CC_OP_TADDTV:
537 carry_32 = gen_add32_carry32();
538 break;
540 case CC_OP_SUB:
541 case CC_OP_TSUB:
542 case CC_OP_TSUBTV:
543 if (TARGET_LONG_BITS == 32) {
544 /* We can re-use the host's hardware carry generation by using
545 a SUB2 opcode. We discard the low part of the output.
546 Ideally we'd combine this operation with the add that
547 generated the carry in the first place. */
548 carry = tcg_temp_new();
549 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
550 tcg_temp_free(carry);
551 goto sub_done;
553 carry_32 = gen_sub32_carry32();
554 break;
556 default:
557 /* We need external help to produce the carry. */
558 carry_32 = tcg_temp_new_i32();
559 gen_helper_compute_C_icc(carry_32, cpu_env);
560 break;
563 #if TARGET_LONG_BITS == 64
564 carry = tcg_temp_new();
565 tcg_gen_extu_i32_i64(carry, carry_32);
566 #else
567 carry = carry_32;
568 #endif
570 tcg_gen_sub_tl(dst, src1, src2);
571 tcg_gen_sub_tl(dst, dst, carry);
573 tcg_temp_free_i32(carry_32);
574 #if TARGET_LONG_BITS == 64
575 tcg_temp_free(carry);
576 #endif
578 sub_done:
579 if (update_cc) {
580 tcg_gen_mov_tl(cpu_cc_src, src1);
581 tcg_gen_mov_tl(cpu_cc_src2, src2);
582 tcg_gen_mov_tl(cpu_cc_dst, dst);
583 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
584 dc->cc_op = CC_OP_SUBX;
588 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
590 TCGv r_temp, zero, t0;
592 r_temp = tcg_temp_new();
593 t0 = tcg_temp_new();
595 /* old op:
596 if (!(env->y & 1))
597 T1 = 0;
599 zero = tcg_const_tl(0);
600 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
601 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
602 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
603 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
604 zero, cpu_cc_src2);
605 tcg_temp_free(zero);
607 // b2 = T0 & 1;
608 // env->y = (b2 << 31) | (env->y >> 1);
609 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
610 tcg_gen_shli_tl(r_temp, r_temp, 31);
611 tcg_gen_shri_tl(t0, cpu_y, 1);
612 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
613 tcg_gen_or_tl(t0, t0, r_temp);
614 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
616 // b1 = N ^ V;
617 gen_mov_reg_N(t0, cpu_psr);
618 gen_mov_reg_V(r_temp, cpu_psr);
619 tcg_gen_xor_tl(t0, t0, r_temp);
620 tcg_temp_free(r_temp);
622 // T0 = (b1 << 31) | (T0 >> 1);
623 // src1 = T0;
624 tcg_gen_shli_tl(t0, t0, 31);
625 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
626 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
627 tcg_temp_free(t0);
629 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
631 tcg_gen_mov_tl(dst, cpu_cc_dst);
634 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
636 #if TARGET_LONG_BITS == 32
637 if (sign_ext) {
638 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
639 } else {
640 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
642 #else
643 TCGv t0 = tcg_temp_new_i64();
644 TCGv t1 = tcg_temp_new_i64();
646 if (sign_ext) {
647 tcg_gen_ext32s_i64(t0, src1);
648 tcg_gen_ext32s_i64(t1, src2);
649 } else {
650 tcg_gen_ext32u_i64(t0, src1);
651 tcg_gen_ext32u_i64(t1, src2);
654 tcg_gen_mul_i64(dst, t0, t1);
655 tcg_temp_free(t0);
656 tcg_temp_free(t1);
658 tcg_gen_shri_i64(cpu_y, dst, 32);
659 #endif
662 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
664 /* zero-extend truncated operands before multiplication */
665 gen_op_multiply(dst, src1, src2, 0);
668 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
670 /* sign-extend truncated operands before multiplication */
671 gen_op_multiply(dst, src1, src2, 1);
674 // 1
675 static inline void gen_op_eval_ba(TCGv dst)
677 tcg_gen_movi_tl(dst, 1);
680 // Z
681 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
683 gen_mov_reg_Z(dst, src);
686 // Z | (N ^ V)
687 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
689 TCGv t0 = tcg_temp_new();
690 gen_mov_reg_N(t0, src);
691 gen_mov_reg_V(dst, src);
692 tcg_gen_xor_tl(dst, dst, t0);
693 gen_mov_reg_Z(t0, src);
694 tcg_gen_or_tl(dst, dst, t0);
695 tcg_temp_free(t0);
698 // N ^ V
699 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
701 TCGv t0 = tcg_temp_new();
702 gen_mov_reg_V(t0, src);
703 gen_mov_reg_N(dst, src);
704 tcg_gen_xor_tl(dst, dst, t0);
705 tcg_temp_free(t0);
708 // C | Z
709 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
711 TCGv t0 = tcg_temp_new();
712 gen_mov_reg_Z(t0, src);
713 gen_mov_reg_C(dst, src);
714 tcg_gen_or_tl(dst, dst, t0);
715 tcg_temp_free(t0);
718 // C
719 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
721 gen_mov_reg_C(dst, src);
724 // V
725 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
727 gen_mov_reg_V(dst, src);
730 // 0
731 static inline void gen_op_eval_bn(TCGv dst)
733 tcg_gen_movi_tl(dst, 0);
736 // N
737 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
739 gen_mov_reg_N(dst, src);
742 // !Z
743 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
745 gen_mov_reg_Z(dst, src);
746 tcg_gen_xori_tl(dst, dst, 0x1);
749 // !(Z | (N ^ V))
750 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
752 gen_op_eval_ble(dst, src);
753 tcg_gen_xori_tl(dst, dst, 0x1);
756 // !(N ^ V)
757 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
759 gen_op_eval_bl(dst, src);
760 tcg_gen_xori_tl(dst, dst, 0x1);
763 // !(C | Z)
764 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
766 gen_op_eval_bleu(dst, src);
767 tcg_gen_xori_tl(dst, dst, 0x1);
770 // !C
771 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
773 gen_mov_reg_C(dst, src);
774 tcg_gen_xori_tl(dst, dst, 0x1);
777 // !N
778 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
780 gen_mov_reg_N(dst, src);
781 tcg_gen_xori_tl(dst, dst, 0x1);
784 // !V
785 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
787 gen_mov_reg_V(dst, src);
788 tcg_gen_xori_tl(dst, dst, 0x1);
792 FPSR bit field FCC1 | FCC0:
796 3 unordered
798 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
799 unsigned int fcc_offset)
801 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
802 tcg_gen_andi_tl(reg, reg, 0x1);
805 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
806 unsigned int fcc_offset)
808 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
809 tcg_gen_andi_tl(reg, reg, 0x1);
812 // !0: FCC0 | FCC1
813 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
814 unsigned int fcc_offset)
816 TCGv t0 = tcg_temp_new();
817 gen_mov_reg_FCC0(dst, src, fcc_offset);
818 gen_mov_reg_FCC1(t0, src, fcc_offset);
819 tcg_gen_or_tl(dst, dst, t0);
820 tcg_temp_free(t0);
823 // 1 or 2: FCC0 ^ FCC1
824 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
825 unsigned int fcc_offset)
827 TCGv t0 = tcg_temp_new();
828 gen_mov_reg_FCC0(dst, src, fcc_offset);
829 gen_mov_reg_FCC1(t0, src, fcc_offset);
830 tcg_gen_xor_tl(dst, dst, t0);
831 tcg_temp_free(t0);
834 // 1 or 3: FCC0
835 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
836 unsigned int fcc_offset)
838 gen_mov_reg_FCC0(dst, src, fcc_offset);
841 // 1: FCC0 & !FCC1
842 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
843 unsigned int fcc_offset)
845 TCGv t0 = tcg_temp_new();
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
847 gen_mov_reg_FCC1(t0, src, fcc_offset);
848 tcg_gen_andc_tl(dst, dst, t0);
849 tcg_temp_free(t0);
852 // 2 or 3: FCC1
853 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
854 unsigned int fcc_offset)
856 gen_mov_reg_FCC1(dst, src, fcc_offset);
859 // 2: !FCC0 & FCC1
860 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
861 unsigned int fcc_offset)
863 TCGv t0 = tcg_temp_new();
864 gen_mov_reg_FCC0(dst, src, fcc_offset);
865 gen_mov_reg_FCC1(t0, src, fcc_offset);
866 tcg_gen_andc_tl(dst, t0, dst);
867 tcg_temp_free(t0);
870 // 3: FCC0 & FCC1
871 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
872 unsigned int fcc_offset)
874 TCGv t0 = tcg_temp_new();
875 gen_mov_reg_FCC0(dst, src, fcc_offset);
876 gen_mov_reg_FCC1(t0, src, fcc_offset);
877 tcg_gen_and_tl(dst, dst, t0);
878 tcg_temp_free(t0);
881 // 0: !(FCC0 | FCC1)
882 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
883 unsigned int fcc_offset)
885 TCGv t0 = tcg_temp_new();
886 gen_mov_reg_FCC0(dst, src, fcc_offset);
887 gen_mov_reg_FCC1(t0, src, fcc_offset);
888 tcg_gen_or_tl(dst, dst, t0);
889 tcg_gen_xori_tl(dst, dst, 0x1);
890 tcg_temp_free(t0);
893 // 0 or 3: !(FCC0 ^ FCC1)
894 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
895 unsigned int fcc_offset)
897 TCGv t0 = tcg_temp_new();
898 gen_mov_reg_FCC0(dst, src, fcc_offset);
899 gen_mov_reg_FCC1(t0, src, fcc_offset);
900 tcg_gen_xor_tl(dst, dst, t0);
901 tcg_gen_xori_tl(dst, dst, 0x1);
902 tcg_temp_free(t0);
905 // 0 or 2: !FCC0
906 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
907 unsigned int fcc_offset)
909 gen_mov_reg_FCC0(dst, src, fcc_offset);
910 tcg_gen_xori_tl(dst, dst, 0x1);
913 // !1: !(FCC0 & !FCC1)
914 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
915 unsigned int fcc_offset)
917 TCGv t0 = tcg_temp_new();
918 gen_mov_reg_FCC0(dst, src, fcc_offset);
919 gen_mov_reg_FCC1(t0, src, fcc_offset);
920 tcg_gen_andc_tl(dst, dst, t0);
921 tcg_gen_xori_tl(dst, dst, 0x1);
922 tcg_temp_free(t0);
925 // 0 or 1: !FCC1
926 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
927 unsigned int fcc_offset)
929 gen_mov_reg_FCC1(dst, src, fcc_offset);
930 tcg_gen_xori_tl(dst, dst, 0x1);
933 // !2: !(!FCC0 & FCC1)
934 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
935 unsigned int fcc_offset)
937 TCGv t0 = tcg_temp_new();
938 gen_mov_reg_FCC0(dst, src, fcc_offset);
939 gen_mov_reg_FCC1(t0, src, fcc_offset);
940 tcg_gen_andc_tl(dst, t0, dst);
941 tcg_gen_xori_tl(dst, dst, 0x1);
942 tcg_temp_free(t0);
945 // !3: !(FCC0 & FCC1)
946 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
947 unsigned int fcc_offset)
949 TCGv t0 = tcg_temp_new();
950 gen_mov_reg_FCC0(dst, src, fcc_offset);
951 gen_mov_reg_FCC1(t0, src, fcc_offset);
952 tcg_gen_and_tl(dst, dst, t0);
953 tcg_gen_xori_tl(dst, dst, 0x1);
954 tcg_temp_free(t0);
957 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
958 target_ulong pc2, TCGv r_cond)
960 TCGLabel *l1 = gen_new_label();
962 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
964 gen_goto_tb(dc, 0, pc1, pc1 + 4);
966 gen_set_label(l1);
967 gen_goto_tb(dc, 1, pc2, pc2 + 4);
970 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
972 TCGLabel *l1 = gen_new_label();
973 target_ulong npc = dc->npc;
975 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
977 gen_goto_tb(dc, 0, npc, pc1);
979 gen_set_label(l1);
980 gen_goto_tb(dc, 1, npc + 4, npc + 8);
982 dc->is_br = 1;
985 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
987 target_ulong npc = dc->npc;
989 if (likely(npc != DYNAMIC_PC)) {
990 dc->pc = npc;
991 dc->jump_pc[0] = pc1;
992 dc->jump_pc[1] = npc + 4;
993 dc->npc = JUMP_PC;
994 } else {
995 TCGv t, z;
997 tcg_gen_mov_tl(cpu_pc, cpu_npc);
999 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1000 t = tcg_const_tl(pc1);
1001 z = tcg_const_tl(0);
1002 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1003 tcg_temp_free(t);
1004 tcg_temp_free(z);
1006 dc->pc = DYNAMIC_PC;
1010 static inline void gen_generic_branch(DisasContext *dc)
1012 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1013 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1014 TCGv zero = tcg_const_tl(0);
1016 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1018 tcg_temp_free(npc0);
1019 tcg_temp_free(npc1);
1020 tcg_temp_free(zero);
1023 /* call this function before using the condition register as it may
1024 have been set for a jump */
1025 static inline void flush_cond(DisasContext *dc)
1027 if (dc->npc == JUMP_PC) {
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC;
1033 static inline void save_npc(DisasContext *dc)
1035 if (dc->npc == JUMP_PC) {
1036 gen_generic_branch(dc);
1037 dc->npc = DYNAMIC_PC;
1038 } else if (dc->npc != DYNAMIC_PC) {
1039 tcg_gen_movi_tl(cpu_npc, dc->npc);
1043 static inline void update_psr(DisasContext *dc)
1045 if (dc->cc_op != CC_OP_FLAGS) {
1046 dc->cc_op = CC_OP_FLAGS;
1047 gen_helper_compute_psr(cpu_env);
1051 static inline void save_state(DisasContext *dc)
1053 tcg_gen_movi_tl(cpu_pc, dc->pc);
1054 save_npc(dc);
1057 static void gen_exception(DisasContext *dc, int which)
1059 TCGv_i32 t;
1061 save_state(dc);
1062 t = tcg_const_i32(which);
1063 gen_helper_raise_exception(cpu_env, t);
1064 tcg_temp_free_i32(t);
1065 dc->is_br = 1;
1068 static void gen_check_align(TCGv addr, int mask)
1070 TCGv_i32 r_mask = tcg_const_i32(mask);
1071 gen_helper_check_align(cpu_env, addr, r_mask);
1072 tcg_temp_free_i32(r_mask);
1075 static inline void gen_mov_pc_npc(DisasContext *dc)
1077 if (dc->npc == JUMP_PC) {
1078 gen_generic_branch(dc);
1079 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1080 dc->pc = DYNAMIC_PC;
1081 } else if (dc->npc == DYNAMIC_PC) {
1082 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1083 dc->pc = DYNAMIC_PC;
1084 } else {
1085 dc->pc = dc->npc;
1089 static inline void gen_op_next_insn(void)
1091 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1092 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1095 static void free_compare(DisasCompare *cmp)
1097 if (!cmp->g1) {
1098 tcg_temp_free(cmp->c1);
1100 if (!cmp->g2) {
1101 tcg_temp_free(cmp->c2);
1105 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1106 DisasContext *dc)
1108 static int subcc_cond[16] = {
1109 TCG_COND_NEVER,
1110 TCG_COND_EQ,
1111 TCG_COND_LE,
1112 TCG_COND_LT,
1113 TCG_COND_LEU,
1114 TCG_COND_LTU,
1115 -1, /* neg */
1116 -1, /* overflow */
1117 TCG_COND_ALWAYS,
1118 TCG_COND_NE,
1119 TCG_COND_GT,
1120 TCG_COND_GE,
1121 TCG_COND_GTU,
1122 TCG_COND_GEU,
1123 -1, /* pos */
1124 -1, /* no overflow */
1127 static int logic_cond[16] = {
1128 TCG_COND_NEVER,
1129 TCG_COND_EQ, /* eq: Z */
1130 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1131 TCG_COND_LT, /* lt: N ^ V -> N */
1132 TCG_COND_EQ, /* leu: C | Z -> Z */
1133 TCG_COND_NEVER, /* ltu: C -> 0 */
1134 TCG_COND_LT, /* neg: N */
1135 TCG_COND_NEVER, /* vs: V -> 0 */
1136 TCG_COND_ALWAYS,
1137 TCG_COND_NE, /* ne: !Z */
1138 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1139 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1140 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1141 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1142 TCG_COND_GE, /* pos: !N */
1143 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1146 TCGv_i32 r_src;
1147 TCGv r_dst;
1149 #ifdef TARGET_SPARC64
1150 if (xcc) {
1151 r_src = cpu_xcc;
1152 } else {
1153 r_src = cpu_psr;
1155 #else
1156 r_src = cpu_psr;
1157 #endif
1159 switch (dc->cc_op) {
1160 case CC_OP_LOGIC:
1161 cmp->cond = logic_cond[cond];
1162 do_compare_dst_0:
1163 cmp->is_bool = false;
1164 cmp->g2 = false;
1165 cmp->c2 = tcg_const_tl(0);
1166 #ifdef TARGET_SPARC64
1167 if (!xcc) {
1168 cmp->g1 = false;
1169 cmp->c1 = tcg_temp_new();
1170 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1171 break;
1173 #endif
1174 cmp->g1 = true;
1175 cmp->c1 = cpu_cc_dst;
1176 break;
1178 case CC_OP_SUB:
1179 switch (cond) {
1180 case 6: /* neg */
1181 case 14: /* pos */
1182 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1183 goto do_compare_dst_0;
1185 case 7: /* overflow */
1186 case 15: /* !overflow */
1187 goto do_dynamic;
1189 default:
1190 cmp->cond = subcc_cond[cond];
1191 cmp->is_bool = false;
1192 #ifdef TARGET_SPARC64
1193 if (!xcc) {
1194 /* Note that sign-extension works for unsigned compares as
1195 long as both operands are sign-extended. */
1196 cmp->g1 = cmp->g2 = false;
1197 cmp->c1 = tcg_temp_new();
1198 cmp->c2 = tcg_temp_new();
1199 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1200 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1201 break;
1203 #endif
1204 cmp->g1 = cmp->g2 = true;
1205 cmp->c1 = cpu_cc_src;
1206 cmp->c2 = cpu_cc_src2;
1207 break;
1209 break;
1211 default:
1212 do_dynamic:
1213 gen_helper_compute_psr(cpu_env);
1214 dc->cc_op = CC_OP_FLAGS;
1215 /* FALLTHRU */
1217 case CC_OP_FLAGS:
1218 /* We're going to generate a boolean result. */
1219 cmp->cond = TCG_COND_NE;
1220 cmp->is_bool = true;
1221 cmp->g1 = cmp->g2 = false;
1222 cmp->c1 = r_dst = tcg_temp_new();
1223 cmp->c2 = tcg_const_tl(0);
1225 switch (cond) {
1226 case 0x0:
1227 gen_op_eval_bn(r_dst);
1228 break;
1229 case 0x1:
1230 gen_op_eval_be(r_dst, r_src);
1231 break;
1232 case 0x2:
1233 gen_op_eval_ble(r_dst, r_src);
1234 break;
1235 case 0x3:
1236 gen_op_eval_bl(r_dst, r_src);
1237 break;
1238 case 0x4:
1239 gen_op_eval_bleu(r_dst, r_src);
1240 break;
1241 case 0x5:
1242 gen_op_eval_bcs(r_dst, r_src);
1243 break;
1244 case 0x6:
1245 gen_op_eval_bneg(r_dst, r_src);
1246 break;
1247 case 0x7:
1248 gen_op_eval_bvs(r_dst, r_src);
1249 break;
1250 case 0x8:
1251 gen_op_eval_ba(r_dst);
1252 break;
1253 case 0x9:
1254 gen_op_eval_bne(r_dst, r_src);
1255 break;
1256 case 0xa:
1257 gen_op_eval_bg(r_dst, r_src);
1258 break;
1259 case 0xb:
1260 gen_op_eval_bge(r_dst, r_src);
1261 break;
1262 case 0xc:
1263 gen_op_eval_bgu(r_dst, r_src);
1264 break;
1265 case 0xd:
1266 gen_op_eval_bcc(r_dst, r_src);
1267 break;
1268 case 0xe:
1269 gen_op_eval_bpos(r_dst, r_src);
1270 break;
1271 case 0xf:
1272 gen_op_eval_bvc(r_dst, r_src);
1273 break;
1275 break;
1279 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1281 unsigned int offset;
1282 TCGv r_dst;
1284 /* For now we still generate a straight boolean result. */
1285 cmp->cond = TCG_COND_NE;
1286 cmp->is_bool = true;
1287 cmp->g1 = cmp->g2 = false;
1288 cmp->c1 = r_dst = tcg_temp_new();
1289 cmp->c2 = tcg_const_tl(0);
1291 switch (cc) {
1292 default:
1293 case 0x0:
1294 offset = 0;
1295 break;
1296 case 0x1:
1297 offset = 32 - 10;
1298 break;
1299 case 0x2:
1300 offset = 34 - 10;
1301 break;
1302 case 0x3:
1303 offset = 36 - 10;
1304 break;
1307 switch (cond) {
1308 case 0x0:
1309 gen_op_eval_bn(r_dst);
1310 break;
1311 case 0x1:
1312 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0x2:
1315 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0x3:
1318 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0x4:
1321 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1322 break;
1323 case 0x5:
1324 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1325 break;
1326 case 0x6:
1327 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1328 break;
1329 case 0x7:
1330 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1331 break;
1332 case 0x8:
1333 gen_op_eval_ba(r_dst);
1334 break;
1335 case 0x9:
1336 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1337 break;
1338 case 0xa:
1339 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1340 break;
1341 case 0xb:
1342 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1343 break;
1344 case 0xc:
1345 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1346 break;
1347 case 0xd:
1348 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1349 break;
1350 case 0xe:
1351 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1352 break;
1353 case 0xf:
1354 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1355 break;
1359 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1360 DisasContext *dc)
1362 DisasCompare cmp;
1363 gen_compare(&cmp, cc, cond, dc);
1365 /* The interface is to return a boolean in r_dst. */
1366 if (cmp.is_bool) {
1367 tcg_gen_mov_tl(r_dst, cmp.c1);
1368 } else {
1369 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1372 free_compare(&cmp);
1375 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1377 DisasCompare cmp;
1378 gen_fcompare(&cmp, cc, cond);
1380 /* The interface is to return a boolean in r_dst. */
1381 if (cmp.is_bool) {
1382 tcg_gen_mov_tl(r_dst, cmp.c1);
1383 } else {
1384 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1387 free_compare(&cmp);
1390 #ifdef TARGET_SPARC64
1391 // Inverted logic
1392 static const int gen_tcg_cond_reg[8] = {
1394 TCG_COND_NE,
1395 TCG_COND_GT,
1396 TCG_COND_GE,
1398 TCG_COND_EQ,
1399 TCG_COND_LE,
1400 TCG_COND_LT,
1403 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1405 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1406 cmp->is_bool = false;
1407 cmp->g1 = true;
1408 cmp->g2 = false;
1409 cmp->c1 = r_src;
1410 cmp->c2 = tcg_const_tl(0);
1413 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1415 DisasCompare cmp;
1416 gen_compare_reg(&cmp, cond, r_src);
1418 /* The interface is to return a boolean in r_dst. */
1419 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1421 free_compare(&cmp);
1423 #endif
1425 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1427 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1428 target_ulong target = dc->pc + offset;
1430 #ifdef TARGET_SPARC64
1431 if (unlikely(AM_CHECK(dc))) {
1432 target &= 0xffffffffULL;
1434 #endif
1435 if (cond == 0x0) {
1436 /* unconditional not taken */
1437 if (a) {
1438 dc->pc = dc->npc + 4;
1439 dc->npc = dc->pc + 4;
1440 } else {
1441 dc->pc = dc->npc;
1442 dc->npc = dc->pc + 4;
1444 } else if (cond == 0x8) {
1445 /* unconditional taken */
1446 if (a) {
1447 dc->pc = target;
1448 dc->npc = dc->pc + 4;
1449 } else {
1450 dc->pc = dc->npc;
1451 dc->npc = target;
1452 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1454 } else {
1455 flush_cond(dc);
1456 gen_cond(cpu_cond, cc, cond, dc);
1457 if (a) {
1458 gen_branch_a(dc, target);
1459 } else {
1460 gen_branch_n(dc, target);
1465 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1467 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1468 target_ulong target = dc->pc + offset;
1470 #ifdef TARGET_SPARC64
1471 if (unlikely(AM_CHECK(dc))) {
1472 target &= 0xffffffffULL;
1474 #endif
1475 if (cond == 0x0) {
1476 /* unconditional not taken */
1477 if (a) {
1478 dc->pc = dc->npc + 4;
1479 dc->npc = dc->pc + 4;
1480 } else {
1481 dc->pc = dc->npc;
1482 dc->npc = dc->pc + 4;
1484 } else if (cond == 0x8) {
1485 /* unconditional taken */
1486 if (a) {
1487 dc->pc = target;
1488 dc->npc = dc->pc + 4;
1489 } else {
1490 dc->pc = dc->npc;
1491 dc->npc = target;
1492 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1494 } else {
1495 flush_cond(dc);
1496 gen_fcond(cpu_cond, cc, cond);
1497 if (a) {
1498 gen_branch_a(dc, target);
1499 } else {
1500 gen_branch_n(dc, target);
1505 #ifdef TARGET_SPARC64
1506 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1507 TCGv r_reg)
1509 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1510 target_ulong target = dc->pc + offset;
1512 if (unlikely(AM_CHECK(dc))) {
1513 target &= 0xffffffffULL;
1515 flush_cond(dc);
1516 gen_cond_reg(cpu_cond, cond, r_reg);
1517 if (a) {
1518 gen_branch_a(dc, target);
1519 } else {
1520 gen_branch_n(dc, target);
1524 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1526 switch (fccno) {
1527 case 0:
1528 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 1:
1531 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1532 break;
1533 case 2:
1534 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1535 break;
1536 case 3:
1537 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1538 break;
1542 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1544 switch (fccno) {
1545 case 0:
1546 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 1:
1549 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1550 break;
1551 case 2:
1552 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1553 break;
1554 case 3:
1555 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1556 break;
1560 static inline void gen_op_fcmpq(int fccno)
1562 switch (fccno) {
1563 case 0:
1564 gen_helper_fcmpq(cpu_fsr, cpu_env);
1565 break;
1566 case 1:
1567 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1568 break;
1569 case 2:
1570 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1571 break;
1572 case 3:
1573 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1574 break;
1578 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1580 switch (fccno) {
1581 case 0:
1582 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 1:
1585 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1586 break;
1587 case 2:
1588 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1589 break;
1590 case 3:
1591 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1592 break;
1596 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1598 switch (fccno) {
1599 case 0:
1600 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 1:
1603 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1604 break;
1605 case 2:
1606 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1607 break;
1608 case 3:
1609 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1610 break;
1614 static inline void gen_op_fcmpeq(int fccno)
1616 switch (fccno) {
1617 case 0:
1618 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1619 break;
1620 case 1:
1621 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1622 break;
1623 case 2:
1624 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1625 break;
1626 case 3:
1627 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1628 break;
1632 #else
1634 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1636 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1639 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1641 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1644 static inline void gen_op_fcmpq(int fccno)
1646 gen_helper_fcmpq(cpu_fsr, cpu_env);
1649 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1651 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1654 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1656 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1659 static inline void gen_op_fcmpeq(int fccno)
1661 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1663 #endif
1665 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1667 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1668 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1669 gen_exception(dc, TT_FP_EXCP);
1672 static int gen_trap_ifnofpu(DisasContext *dc)
1674 #if !defined(CONFIG_USER_ONLY)
1675 if (!dc->fpu_enabled) {
1676 gen_exception(dc, TT_NFPU_INSN);
1677 return 1;
1679 #endif
1680 return 0;
1683 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1685 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1688 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1689 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1691 TCGv_i32 dst, src;
1693 src = gen_load_fpr_F(dc, rs);
1694 dst = gen_dest_fpr_F(dc);
1696 gen(dst, cpu_env, src);
1697 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1699 gen_store_fpr_F(dc, rd, dst);
1702 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1703 void (*gen)(TCGv_i32, TCGv_i32))
1705 TCGv_i32 dst, src;
1707 src = gen_load_fpr_F(dc, rs);
1708 dst = gen_dest_fpr_F(dc);
1710 gen(dst, src);
1712 gen_store_fpr_F(dc, rd, dst);
1715 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1716 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1718 TCGv_i32 dst, src1, src2;
1720 src1 = gen_load_fpr_F(dc, rs1);
1721 src2 = gen_load_fpr_F(dc, rs2);
1722 dst = gen_dest_fpr_F(dc);
1724 gen(dst, cpu_env, src1, src2);
1725 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1727 gen_store_fpr_F(dc, rd, dst);
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1732 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1734 TCGv_i32 dst, src1, src2;
1736 src1 = gen_load_fpr_F(dc, rs1);
1737 src2 = gen_load_fpr_F(dc, rs2);
1738 dst = gen_dest_fpr_F(dc);
1740 gen(dst, src1, src2);
1742 gen_store_fpr_F(dc, rd, dst);
1744 #endif
1746 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1747 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1749 TCGv_i64 dst, src;
1751 src = gen_load_fpr_D(dc, rs);
1752 dst = gen_dest_fpr_D(dc, rd);
1754 gen(dst, cpu_env, src);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_D(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1762 void (*gen)(TCGv_i64, TCGv_i64))
1764 TCGv_i64 dst, src;
1766 src = gen_load_fpr_D(dc, rs);
1767 dst = gen_dest_fpr_D(dc, rd);
1769 gen(dst, src);
1771 gen_store_fpr_D(dc, rd, dst);
1773 #endif
1775 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1776 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1778 TCGv_i64 dst, src1, src2;
1780 src1 = gen_load_fpr_D(dc, rs1);
1781 src2 = gen_load_fpr_D(dc, rs2);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src1, src2);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src1, src2;
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1800 gen(dst, src1, src2);
1802 gen_store_fpr_D(dc, rd, dst);
1805 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_gsr, src1, src2);
1816 gen_store_fpr_D(dc, rd, dst);
1819 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1820 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1822 TCGv_i64 dst, src0, src1, src2;
1824 src1 = gen_load_fpr_D(dc, rs1);
1825 src2 = gen_load_fpr_D(dc, rs2);
1826 src0 = gen_load_fpr_D(dc, rd);
1827 dst = gen_dest_fpr_D(dc, rd);
1829 gen(dst, src0, src1, src2);
1831 gen_store_fpr_D(dc, rd, dst);
1833 #endif
1835 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1836 void (*gen)(TCGv_ptr))
1838 gen_op_load_fpr_QT1(QFPREG(rs));
1840 gen(cpu_env);
1841 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1843 gen_op_store_QT0_fpr(QFPREG(rd));
1844 gen_update_fprs_dirty(dc, QFPREG(rd));
1847 #ifdef TARGET_SPARC64
1848 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1849 void (*gen)(TCGv_ptr))
1851 gen_op_load_fpr_QT1(QFPREG(rs));
1853 gen(cpu_env);
1855 gen_op_store_QT0_fpr(QFPREG(rd));
1856 gen_update_fprs_dirty(dc, QFPREG(rd));
1858 #endif
1860 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1861 void (*gen)(TCGv_ptr))
1863 gen_op_load_fpr_QT0(QFPREG(rs1));
1864 gen_op_load_fpr_QT1(QFPREG(rs2));
1866 gen(cpu_env);
1867 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1869 gen_op_store_QT0_fpr(QFPREG(rd));
1870 gen_update_fprs_dirty(dc, QFPREG(rd));
1873 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1874 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1876 TCGv_i64 dst;
1877 TCGv_i32 src1, src2;
1879 src1 = gen_load_fpr_F(dc, rs1);
1880 src2 = gen_load_fpr_F(dc, rs2);
1881 dst = gen_dest_fpr_D(dc, rd);
1883 gen(dst, cpu_env, src1, src2);
1884 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1886 gen_store_fpr_D(dc, rd, dst);
1889 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1890 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1892 TCGv_i64 src1, src2;
1894 src1 = gen_load_fpr_D(dc, rs1);
1895 src2 = gen_load_fpr_D(dc, rs2);
1897 gen(cpu_env, src1, src2);
1898 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1900 gen_op_store_QT0_fpr(QFPREG(rd));
1901 gen_update_fprs_dirty(dc, QFPREG(rd));
1904 #ifdef TARGET_SPARC64
1905 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1906 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1908 TCGv_i64 dst;
1909 TCGv_i32 src;
1911 src = gen_load_fpr_F(dc, rs);
1912 dst = gen_dest_fpr_D(dc, rd);
1914 gen(dst, cpu_env, src);
1915 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1917 gen_store_fpr_D(dc, rd, dst);
1919 #endif
1921 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1922 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1924 TCGv_i64 dst;
1925 TCGv_i32 src;
1927 src = gen_load_fpr_F(dc, rs);
1928 dst = gen_dest_fpr_D(dc, rd);
1930 gen(dst, cpu_env, src);
1932 gen_store_fpr_D(dc, rd, dst);
1935 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1938 TCGv_i32 dst;
1939 TCGv_i64 src;
1941 src = gen_load_fpr_D(dc, rs);
1942 dst = gen_dest_fpr_F(dc);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_F(dc, rd, dst);
1950 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1951 void (*gen)(TCGv_i32, TCGv_ptr))
1953 TCGv_i32 dst;
1955 gen_op_load_fpr_QT1(QFPREG(rs));
1956 dst = gen_dest_fpr_F(dc);
1958 gen(dst, cpu_env);
1959 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1961 gen_store_fpr_F(dc, rd, dst);
1964 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1965 void (*gen)(TCGv_i64, TCGv_ptr))
1967 TCGv_i64 dst;
1969 gen_op_load_fpr_QT1(QFPREG(rs));
1970 dst = gen_dest_fpr_D(dc, rd);
1972 gen(dst, cpu_env);
1973 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1975 gen_store_fpr_D(dc, rd, dst);
1978 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1979 void (*gen)(TCGv_ptr, TCGv_i32))
1981 TCGv_i32 src;
1983 src = gen_load_fpr_F(dc, rs);
1985 gen(cpu_env, src);
1987 gen_op_store_QT0_fpr(QFPREG(rd));
1988 gen_update_fprs_dirty(dc, QFPREG(rd));
1991 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1992 void (*gen)(TCGv_ptr, TCGv_i64))
1994 TCGv_i64 src;
1996 src = gen_load_fpr_D(dc, rs);
1998 gen(cpu_env, src);
2000 gen_op_store_QT0_fpr(QFPREG(rd));
2001 gen_update_fprs_dirty(dc, QFPREG(rd));
2004 /* asi moves */
2005 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2006 typedef enum {
2007 GET_ASI_HELPER,
2008 GET_ASI_EXCP,
2009 GET_ASI_DIRECT,
2010 GET_ASI_DTWINX,
2011 GET_ASI_BLOCK,
2012 GET_ASI_SHORT,
2013 } ASIType;
2015 typedef struct {
2016 ASIType type;
2017 int asi;
2018 int mem_idx;
2019 TCGMemOp memop;
2020 } DisasASI;
2022 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2024 int asi = GET_FIELD(insn, 19, 26);
2025 ASIType type = GET_ASI_HELPER;
2026 int mem_idx = dc->mem_idx;
2028 #ifndef TARGET_SPARC64
2029 /* Before v9, all asis are immediate and privileged. */
2030 if (IS_IMM) {
2031 gen_exception(dc, TT_ILL_INSN);
2032 type = GET_ASI_EXCP;
2033 } else if (supervisor(dc)
2034 /* Note that LEON accepts ASI_USERDATA in user mode, for
2035 use with CASA. Also note that previous versions of
2036 QEMU allowed (and old versions of gcc emitted) ASI_P
2037 for LEON, which is incorrect. */
2038 || (asi == ASI_USERDATA
2039 && (dc->def->features & CPU_FEATURE_CASA))) {
2040 switch (asi) {
2041 case ASI_USERDATA: /* User data access */
2042 mem_idx = MMU_USER_IDX;
2043 type = GET_ASI_DIRECT;
2044 break;
2045 case ASI_KERNELDATA: /* Supervisor data access */
2046 mem_idx = MMU_KERNEL_IDX;
2047 type = GET_ASI_DIRECT;
2048 break;
2050 } else {
2051 gen_exception(dc, TT_PRIV_INSN);
2052 type = GET_ASI_EXCP;
2054 #else
2055 if (IS_IMM) {
2056 asi = dc->asi;
2058 /* With v9, all asis below 0x80 are privileged. */
2059 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2060 down that bit into DisasContext. For the moment that's ok,
2061 since the direct implementations below doesn't have any ASIs
2062 in the restricted [0x30, 0x7f] range, and the check will be
2063 done properly in the helper. */
2064 if (!supervisor(dc) && asi < 0x80) {
2065 gen_exception(dc, TT_PRIV_ACT);
2066 type = GET_ASI_EXCP;
2067 } else {
2068 switch (asi) {
2069 case ASI_N: /* Nucleus */
2070 case ASI_NL: /* Nucleus LE */
2071 case ASI_TWINX_N:
2072 case ASI_TWINX_NL:
2073 mem_idx = MMU_NUCLEUS_IDX;
2074 break;
2075 case ASI_AIUP: /* As if user primary */
2076 case ASI_AIUPL: /* As if user primary LE */
2077 case ASI_TWINX_AIUP:
2078 case ASI_TWINX_AIUP_L:
2079 case ASI_BLK_AIUP_4V:
2080 case ASI_BLK_AIUP_L_4V:
2081 case ASI_BLK_AIUP:
2082 case ASI_BLK_AIUPL:
2083 mem_idx = MMU_USER_IDX;
2084 break;
2085 case ASI_AIUS: /* As if user secondary */
2086 case ASI_AIUSL: /* As if user secondary LE */
2087 case ASI_TWINX_AIUS:
2088 case ASI_TWINX_AIUS_L:
2089 case ASI_BLK_AIUS_4V:
2090 case ASI_BLK_AIUS_L_4V:
2091 case ASI_BLK_AIUS:
2092 case ASI_BLK_AIUSL:
2093 mem_idx = MMU_USER_SECONDARY_IDX;
2094 break;
2095 case ASI_S: /* Secondary */
2096 case ASI_SL: /* Secondary LE */
2097 case ASI_TWINX_S:
2098 case ASI_TWINX_SL:
2099 case ASI_BLK_COMMIT_S:
2100 case ASI_BLK_S:
2101 case ASI_BLK_SL:
2102 case ASI_FL8_S:
2103 case ASI_FL8_SL:
2104 case ASI_FL16_S:
2105 case ASI_FL16_SL:
2106 if (mem_idx == MMU_USER_IDX) {
2107 mem_idx = MMU_USER_SECONDARY_IDX;
2108 } else if (mem_idx == MMU_KERNEL_IDX) {
2109 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2111 break;
2112 case ASI_P: /* Primary */
2113 case ASI_PL: /* Primary LE */
2114 case ASI_TWINX_P:
2115 case ASI_TWINX_PL:
2116 case ASI_BLK_COMMIT_P:
2117 case ASI_BLK_P:
2118 case ASI_BLK_PL:
2119 case ASI_FL8_P:
2120 case ASI_FL8_PL:
2121 case ASI_FL16_P:
2122 case ASI_FL16_PL:
2123 break;
2125 switch (asi) {
2126 case ASI_N:
2127 case ASI_NL:
2128 case ASI_AIUP:
2129 case ASI_AIUPL:
2130 case ASI_AIUS:
2131 case ASI_AIUSL:
2132 case ASI_S:
2133 case ASI_SL:
2134 case ASI_P:
2135 case ASI_PL:
2136 type = GET_ASI_DIRECT;
2137 break;
2138 case ASI_TWINX_N:
2139 case ASI_TWINX_NL:
2140 case ASI_TWINX_AIUP:
2141 case ASI_TWINX_AIUP_L:
2142 case ASI_TWINX_AIUS:
2143 case ASI_TWINX_AIUS_L:
2144 case ASI_TWINX_P:
2145 case ASI_TWINX_PL:
2146 case ASI_TWINX_S:
2147 case ASI_TWINX_SL:
2148 type = GET_ASI_DTWINX;
2149 break;
2150 case ASI_BLK_COMMIT_P:
2151 case ASI_BLK_COMMIT_S:
2152 case ASI_BLK_AIUP_4V:
2153 case ASI_BLK_AIUP_L_4V:
2154 case ASI_BLK_AIUP:
2155 case ASI_BLK_AIUPL:
2156 case ASI_BLK_AIUS_4V:
2157 case ASI_BLK_AIUS_L_4V:
2158 case ASI_BLK_AIUS:
2159 case ASI_BLK_AIUSL:
2160 case ASI_BLK_S:
2161 case ASI_BLK_SL:
2162 case ASI_BLK_P:
2163 case ASI_BLK_PL:
2164 type = GET_ASI_BLOCK;
2165 break;
2166 case ASI_FL8_S:
2167 case ASI_FL8_SL:
2168 case ASI_FL8_P:
2169 case ASI_FL8_PL:
2170 memop = MO_UB;
2171 type = GET_ASI_SHORT;
2172 break;
2173 case ASI_FL16_S:
2174 case ASI_FL16_SL:
2175 case ASI_FL16_P:
2176 case ASI_FL16_PL:
2177 memop = MO_TEUW;
2178 type = GET_ASI_SHORT;
2179 break;
2181 /* The little-endian asis all have bit 3 set. */
2182 if (asi & 8) {
2183 memop ^= MO_BSWAP;
2186 #endif
2188 return (DisasASI){ type, asi, mem_idx, memop };
2191 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2192 int insn, TCGMemOp memop)
2194 DisasASI da = get_asi(dc, insn, memop);
2196 switch (da.type) {
2197 case GET_ASI_EXCP:
2198 break;
2199 case GET_ASI_DTWINX: /* Reserved for ldda. */
2200 gen_exception(dc, TT_ILL_INSN);
2201 break;
2202 case GET_ASI_DIRECT:
2203 gen_address_mask(dc, addr);
2204 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2205 break;
2206 default:
2208 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2209 TCGv_i32 r_mop = tcg_const_i32(memop);
2211 save_state(dc);
2212 #ifdef TARGET_SPARC64
2213 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2214 #else
2216 TCGv_i64 t64 = tcg_temp_new_i64();
2217 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2218 tcg_gen_trunc_i64_tl(dst, t64);
2219 tcg_temp_free_i64(t64);
2221 #endif
2222 tcg_temp_free_i32(r_mop);
2223 tcg_temp_free_i32(r_asi);
2225 break;
2229 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2230 int insn, TCGMemOp memop)
2232 DisasASI da = get_asi(dc, insn, memop);
2234 switch (da.type) {
2235 case GET_ASI_EXCP:
2236 break;
2237 case GET_ASI_DTWINX: /* Reserved for stda. */
2238 gen_exception(dc, TT_ILL_INSN);
2239 break;
2240 case GET_ASI_DIRECT:
2241 gen_address_mask(dc, addr);
2242 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2243 break;
2244 default:
2246 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2247 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2249 save_state(dc);
2250 #ifdef TARGET_SPARC64
2251 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2252 #else
2254 TCGv_i64 t64 = tcg_temp_new_i64();
2255 tcg_gen_extu_tl_i64(t64, src);
2256 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2257 tcg_temp_free_i64(t64);
2259 #endif
2260 tcg_temp_free_i32(r_mop);
2261 tcg_temp_free_i32(r_asi);
2263 /* A write to a TLB register may alter page maps. End the TB. */
2264 dc->npc = DYNAMIC_PC;
2266 break;
2270 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2271 TCGv addr, int insn)
2273 DisasASI da = get_asi(dc, insn, MO_TEUL);
2275 switch (da.type) {
2276 case GET_ASI_EXCP:
2277 break;
2278 default:
2280 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2281 TCGv_i32 r_mop = tcg_const_i32(MO_UL);
2282 TCGv_i64 s64, t64;
2284 save_state(dc);
2285 t64 = tcg_temp_new_i64();
2286 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2288 s64 = tcg_temp_new_i64();
2289 tcg_gen_extu_tl_i64(s64, src);
2290 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2291 tcg_temp_free_i64(s64);
2292 tcg_temp_free_i32(r_mop);
2293 tcg_temp_free_i32(r_asi);
2295 tcg_gen_trunc_i64_tl(dst, t64);
2296 tcg_temp_free_i64(t64);
2298 break;
2302 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv val2,
2303 int insn, int rd)
2305 DisasASI da = get_asi(dc, insn, MO_TEUL);
2306 TCGv val1, dst;
2307 TCGv_i32 r_asi;
2309 if (da.type == GET_ASI_EXCP) {
2310 return;
2313 save_state(dc);
2314 val1 = gen_load_gpr(dc, rd);
2315 dst = gen_dest_gpr(dc, rd);
2316 r_asi = tcg_const_i32(da.asi);
2317 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2318 tcg_temp_free_i32(r_asi);
2319 gen_store_gpr(dc, rd, dst);
2322 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2324 DisasASI da = get_asi(dc, insn, MO_UB);
2326 switch (da.type) {
2327 case GET_ASI_EXCP:
2328 break;
2329 default:
2331 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2332 TCGv_i32 r_mop = tcg_const_i32(MO_UB);
2333 TCGv_i64 s64, t64;
2335 save_state(dc);
2336 t64 = tcg_temp_new_i64();
2337 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2339 s64 = tcg_const_i64(0xff);
2340 gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
2341 tcg_temp_free_i64(s64);
2342 tcg_temp_free_i32(r_mop);
2343 tcg_temp_free_i32(r_asi);
2345 tcg_gen_trunc_i64_tl(dst, t64);
2346 tcg_temp_free_i64(t64);
2348 break;
2351 #endif
2353 #ifdef TARGET_SPARC64
2354 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2355 int insn, int size, int rd)
2357 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2358 TCGv_i32 d32;
2360 switch (da.type) {
2361 case GET_ASI_EXCP:
2362 break;
2364 case GET_ASI_DIRECT:
2365 gen_address_mask(dc, addr);
2366 switch (size) {
2367 case 4:
2368 d32 = gen_dest_fpr_F(dc);
2369 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2370 gen_store_fpr_F(dc, rd, d32);
2371 break;
2372 case 8:
2373 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2374 break;
2375 case 16:
2376 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2377 tcg_gen_addi_tl(addr, addr, 8);
2378 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2379 break;
2380 default:
2381 g_assert_not_reached();
2383 break;
2385 case GET_ASI_BLOCK:
2386 /* Valid for lddfa on aligned registers only. */
2387 if (size == 8 && (rd & 7) == 0) {
2388 TCGv eight;
2389 int i;
2391 gen_check_align(addr, 0x3f);
2392 gen_address_mask(dc, addr);
2394 eight = tcg_const_tl(8);
2395 for (i = 0; ; ++i) {
2396 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2397 da.mem_idx, da.memop);
2398 if (i == 7) {
2399 break;
2401 tcg_gen_add_tl(addr, addr, eight);
2403 tcg_temp_free(eight);
2404 } else {
2405 gen_exception(dc, TT_ILL_INSN);
2407 break;
2409 case GET_ASI_SHORT:
2410 /* Valid for lddfa only. */
2411 if (size == 8) {
2412 gen_address_mask(dc, addr);
2413 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2414 } else {
2415 gen_exception(dc, TT_ILL_INSN);
2417 break;
2419 default:
2421 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2422 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2424 save_state(dc);
2425 /* According to the table in the UA2011 manual, the only
2426 other asis that are valid for ldfa/lddfa/ldqfa are
2427 the NO_FAULT asis. We still need a helper for these,
2428 but we can just use the integer asi helper for them. */
2429 switch (size) {
2430 case 4:
2432 TCGv d64 = tcg_temp_new_i64();
2433 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2434 d32 = gen_dest_fpr_F(dc);
2435 tcg_gen_extrl_i64_i32(d32, d64);
2436 tcg_temp_free_i64(d64);
2437 gen_store_fpr_F(dc, rd, d32);
2439 break;
2440 case 8:
2441 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2442 break;
2443 case 16:
2444 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2445 tcg_gen_addi_tl(addr, addr, 8);
2446 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2447 break;
2448 default:
2449 g_assert_not_reached();
2451 tcg_temp_free_i32(r_mop);
2452 tcg_temp_free_i32(r_asi);
2454 break;
2458 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2459 int insn, int size, int rd)
2461 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2462 TCGv_i32 d32;
2464 switch (da.type) {
2465 case GET_ASI_EXCP:
2466 break;
2468 case GET_ASI_DIRECT:
2469 gen_address_mask(dc, addr);
2470 switch (size) {
2471 case 4:
2472 d32 = gen_load_fpr_F(dc, rd);
2473 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2474 break;
2475 case 8:
2476 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2477 break;
2478 case 16:
2479 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2480 tcg_gen_addi_tl(addr, addr, 8);
2481 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2482 break;
2483 default:
2484 g_assert_not_reached();
2486 break;
2488 case GET_ASI_BLOCK:
2489 /* Valid for stdfa on aligned registers only. */
2490 if (size == 8 && (rd & 7) == 0) {
2491 TCGv eight;
2492 int i;
2494 gen_check_align(addr, 0x3f);
2495 gen_address_mask(dc, addr);
2497 eight = tcg_const_tl(8);
2498 for (i = 0; ; ++i) {
2499 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2500 da.mem_idx, da.memop);
2501 if (i == 7) {
2502 break;
2504 tcg_gen_add_tl(addr, addr, eight);
2506 tcg_temp_free(eight);
2507 } else {
2508 gen_exception(dc, TT_ILL_INSN);
2510 break;
2512 case GET_ASI_SHORT:
2513 /* Valid for stdfa only. */
2514 if (size == 8) {
2515 gen_address_mask(dc, addr);
2516 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2517 } else {
2518 gen_exception(dc, TT_ILL_INSN);
2520 break;
2522 default:
2523 /* According to the table in the UA2011 manual, the only
2524 other asis that are valid for ldfa/lddfa/ldqfa are
2525 the PST* asis, which aren't currently handled. */
2526 gen_exception(dc, TT_ILL_INSN);
2527 break;
2531 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2533 DisasASI da = get_asi(dc, insn, MO_TEQ);
2534 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2535 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2537 switch (da.type) {
2538 case GET_ASI_EXCP:
2539 return;
2541 case GET_ASI_DTWINX:
2542 gen_check_align(addr, 15);
2543 gen_address_mask(dc, addr);
2544 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop);
2545 tcg_gen_addi_tl(addr, addr, 8);
2546 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2547 break;
2549 case GET_ASI_DIRECT:
2551 TCGv_i64 tmp = tcg_temp_new_i64();
2553 gen_address_mask(dc, addr);
2554 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2556 /* Note that LE ldda acts as if each 32-bit register
2557 result is byte swapped. Having just performed one
2558 64-bit bswap, we need now to swap the writebacks. */
2559 if ((da.memop & MO_BSWAP) == MO_TE) {
2560 tcg_gen_extr32_i64(lo, hi, tmp);
2561 } else {
2562 tcg_gen_extr32_i64(hi, lo, tmp);
2564 tcg_temp_free_i64(tmp);
2566 break;
2568 default:
2570 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2572 save_state(dc);
2573 gen_helper_ldda_asi(cpu_env, addr, r_asi);
2574 tcg_temp_free_i32(r_asi);
2576 tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUSPARCState, qt0.high));
2577 tcg_gen_ld_i64(lo, cpu_env, offsetof(CPUSPARCState, qt0.low));
2579 break;
2582 gen_store_gpr(dc, rd, hi);
2583 gen_store_gpr(dc, rd + 1, lo);
2586 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2587 int insn, int rd)
2589 DisasASI da = get_asi(dc, insn, MO_TEQ);
2590 TCGv lo = gen_load_gpr(dc, rd + 1);
2592 switch (da.type) {
2593 case GET_ASI_EXCP:
2594 break;
2596 case GET_ASI_DTWINX:
2597 gen_check_align(addr, 15);
2598 gen_address_mask(dc, addr);
2599 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop);
2600 tcg_gen_addi_tl(addr, addr, 8);
2601 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2602 break;
2604 case GET_ASI_DIRECT:
2606 TCGv_i64 t64 = tcg_temp_new_i64();
2608 /* Note that LE stda acts as if each 32-bit register result is
2609 byte swapped. We will perform one 64-bit LE store, so now
2610 we must swap the order of the construction. */
2611 if ((da.memop & MO_BSWAP) == MO_TE) {
2612 tcg_gen_concat32_i64(t64, lo, hi);
2613 } else {
2614 tcg_gen_concat32_i64(t64, hi, lo);
2616 gen_address_mask(dc, addr);
2617 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2618 tcg_temp_free_i64(t64);
2620 break;
2622 default:
2624 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2625 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2626 TCGv_i64 t64;
2628 save_state(dc);
2630 t64 = tcg_temp_new_i64();
2631 tcg_gen_concat_tl_i64(t64, lo, hi);
2632 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2633 tcg_temp_free_i32(r_mop);
2634 tcg_temp_free_i32(r_asi);
2635 tcg_temp_free_i64(t64);
2637 break;
2641 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv val2,
2642 int insn, int rd)
2644 DisasASI da = get_asi(dc, insn, MO_TEQ);
2645 TCGv val1 = gen_load_gpr(dc, rd);
2646 TCGv dst = gen_dest_gpr(dc, rd);
2647 TCGv_i32 r_asi;
2649 if (da.type == GET_ASI_EXCP) {
2650 return;
2653 save_state(dc);
2654 r_asi = tcg_const_i32(da.asi);
2655 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2656 tcg_temp_free_i32(r_asi);
2657 gen_store_gpr(dc, rd, dst);
2660 #elif !defined(CONFIG_USER_ONLY)
2661 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2663 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2664 whereby "rd + 1" elicits "error: array subscript is above array".
2665 Since we have already asserted that rd is even, the semantics
2666 are unchanged. */
2667 TCGv lo = gen_dest_gpr(dc, rd | 1);
2668 TCGv hi = gen_dest_gpr(dc, rd);
2669 TCGv_i64 t64 = tcg_temp_new_i64();
2670 DisasASI da = get_asi(dc, insn, MO_TEQ);
2672 switch (da.type) {
2673 case GET_ASI_EXCP:
2674 tcg_temp_free_i64(t64);
2675 return;
2676 case GET_ASI_DIRECT:
2677 gen_address_mask(dc, addr);
2678 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2679 break;
2680 default:
2682 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2683 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2685 save_state(dc);
2686 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2687 tcg_temp_free_i32(r_mop);
2688 tcg_temp_free_i32(r_asi);
2690 break;
2693 tcg_gen_extr_i64_i32(lo, hi, t64);
2694 tcg_temp_free_i64(t64);
2695 gen_store_gpr(dc, rd | 1, lo);
2696 gen_store_gpr(dc, rd, hi);
2699 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2700 int insn, int rd)
2702 DisasASI da = get_asi(dc, insn, MO_TEQ);
2703 TCGv lo = gen_load_gpr(dc, rd + 1);
2704 TCGv_i64 t64 = tcg_temp_new_i64();
2706 tcg_gen_concat_tl_i64(t64, lo, hi);
2708 switch (da.type) {
2709 case GET_ASI_EXCP:
2710 break;
2711 case GET_ASI_DIRECT:
2712 gen_address_mask(dc, addr);
2713 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2714 break;
2715 default:
2717 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2718 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2720 save_state(dc);
2721 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2722 tcg_temp_free_i32(r_mop);
2723 tcg_temp_free_i32(r_asi);
2725 break;
2728 tcg_temp_free_i64(t64);
2730 #endif
2732 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2734 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2735 return gen_load_gpr(dc, rs1);
2738 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2740 if (IS_IMM) { /* immediate */
2741 target_long simm = GET_FIELDs(insn, 19, 31);
2742 TCGv t = get_temp_tl(dc);
2743 tcg_gen_movi_tl(t, simm);
2744 return t;
2745 } else { /* register */
2746 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2747 return gen_load_gpr(dc, rs2);
2751 #ifdef TARGET_SPARC64
2752 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2754 TCGv_i32 c32, zero, dst, s1, s2;
2756 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2757 or fold the comparison down to 32 bits and use movcond_i32. Choose
2758 the later. */
2759 c32 = tcg_temp_new_i32();
2760 if (cmp->is_bool) {
2761 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2762 } else {
2763 TCGv_i64 c64 = tcg_temp_new_i64();
2764 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2765 tcg_gen_extrl_i64_i32(c32, c64);
2766 tcg_temp_free_i64(c64);
2769 s1 = gen_load_fpr_F(dc, rs);
2770 s2 = gen_load_fpr_F(dc, rd);
2771 dst = gen_dest_fpr_F(dc);
2772 zero = tcg_const_i32(0);
2774 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2776 tcg_temp_free_i32(c32);
2777 tcg_temp_free_i32(zero);
2778 gen_store_fpr_F(dc, rd, dst);
2781 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2783 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2784 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2785 gen_load_fpr_D(dc, rs),
2786 gen_load_fpr_D(dc, rd));
2787 gen_store_fpr_D(dc, rd, dst);
2790 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2792 int qd = QFPREG(rd);
2793 int qs = QFPREG(rs);
2795 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2796 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2797 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2798 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2800 gen_update_fprs_dirty(dc, qd);
2803 #ifndef CONFIG_USER_ONLY
2804 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2806 TCGv_i32 r_tl = tcg_temp_new_i32();
2808 /* load env->tl into r_tl */
2809 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2811 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2812 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2814 /* calculate offset to current trap state from env->ts, reuse r_tl */
2815 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2816 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2818 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2820 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2821 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2822 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2823 tcg_temp_free_ptr(r_tl_tmp);
2826 tcg_temp_free_i32(r_tl);
2828 #endif
2830 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2831 int width, bool cc, bool left)
2833 TCGv lo1, lo2, t1, t2;
2834 uint64_t amask, tabl, tabr;
2835 int shift, imask, omask;
2837 if (cc) {
2838 tcg_gen_mov_tl(cpu_cc_src, s1);
2839 tcg_gen_mov_tl(cpu_cc_src2, s2);
2840 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2841 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2842 dc->cc_op = CC_OP_SUB;
2845 /* Theory of operation: there are two tables, left and right (not to
2846 be confused with the left and right versions of the opcode). These
2847 are indexed by the low 3 bits of the inputs. To make things "easy",
2848 these tables are loaded into two constants, TABL and TABR below.
2849 The operation index = (input & imask) << shift calculates the index
2850 into the constant, while val = (table >> index) & omask calculates
2851 the value we're looking for. */
2852 switch (width) {
2853 case 8:
2854 imask = 0x7;
2855 shift = 3;
2856 omask = 0xff;
2857 if (left) {
2858 tabl = 0x80c0e0f0f8fcfeffULL;
2859 tabr = 0xff7f3f1f0f070301ULL;
2860 } else {
2861 tabl = 0x0103070f1f3f7fffULL;
2862 tabr = 0xfffefcf8f0e0c080ULL;
2864 break;
2865 case 16:
2866 imask = 0x6;
2867 shift = 1;
2868 omask = 0xf;
2869 if (left) {
2870 tabl = 0x8cef;
2871 tabr = 0xf731;
2872 } else {
2873 tabl = 0x137f;
2874 tabr = 0xfec8;
2876 break;
2877 case 32:
2878 imask = 0x4;
2879 shift = 0;
2880 omask = 0x3;
2881 if (left) {
2882 tabl = (2 << 2) | 3;
2883 tabr = (3 << 2) | 1;
2884 } else {
2885 tabl = (1 << 2) | 3;
2886 tabr = (3 << 2) | 2;
2888 break;
2889 default:
2890 abort();
2893 lo1 = tcg_temp_new();
2894 lo2 = tcg_temp_new();
2895 tcg_gen_andi_tl(lo1, s1, imask);
2896 tcg_gen_andi_tl(lo2, s2, imask);
2897 tcg_gen_shli_tl(lo1, lo1, shift);
2898 tcg_gen_shli_tl(lo2, lo2, shift);
2900 t1 = tcg_const_tl(tabl);
2901 t2 = tcg_const_tl(tabr);
2902 tcg_gen_shr_tl(lo1, t1, lo1);
2903 tcg_gen_shr_tl(lo2, t2, lo2);
2904 tcg_gen_andi_tl(dst, lo1, omask);
2905 tcg_gen_andi_tl(lo2, lo2, omask);
2907 amask = -8;
2908 if (AM_CHECK(dc)) {
2909 amask &= 0xffffffffULL;
2911 tcg_gen_andi_tl(s1, s1, amask);
2912 tcg_gen_andi_tl(s2, s2, amask);
2914 /* We want to compute
2915 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2916 We've already done dst = lo1, so this reduces to
2917 dst &= (s1 == s2 ? -1 : lo2)
2918 Which we perform by
2919 lo2 |= -(s1 == s2)
2920 dst &= lo2
2922 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2923 tcg_gen_neg_tl(t1, t1);
2924 tcg_gen_or_tl(lo2, lo2, t1);
2925 tcg_gen_and_tl(dst, dst, lo2);
2927 tcg_temp_free(lo1);
2928 tcg_temp_free(lo2);
2929 tcg_temp_free(t1);
2930 tcg_temp_free(t2);
2933 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2935 TCGv tmp = tcg_temp_new();
2937 tcg_gen_add_tl(tmp, s1, s2);
2938 tcg_gen_andi_tl(dst, tmp, -8);
2939 if (left) {
2940 tcg_gen_neg_tl(tmp, tmp);
2942 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2944 tcg_temp_free(tmp);
2947 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2949 TCGv t1, t2, shift;
2951 t1 = tcg_temp_new();
2952 t2 = tcg_temp_new();
2953 shift = tcg_temp_new();
2955 tcg_gen_andi_tl(shift, gsr, 7);
2956 tcg_gen_shli_tl(shift, shift, 3);
2957 tcg_gen_shl_tl(t1, s1, shift);
2959 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2960 shift of (up to 63) followed by a constant shift of 1. */
2961 tcg_gen_xori_tl(shift, shift, 63);
2962 tcg_gen_shr_tl(t2, s2, shift);
2963 tcg_gen_shri_tl(t2, t2, 1);
2965 tcg_gen_or_tl(dst, t1, t2);
2967 tcg_temp_free(t1);
2968 tcg_temp_free(t2);
2969 tcg_temp_free(shift);
2971 #endif
2973 #define CHECK_IU_FEATURE(dc, FEATURE) \
2974 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2975 goto illegal_insn;
2976 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2977 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2978 goto nfpu_insn;
2980 /* before an instruction, dc->pc must be static */
2981 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2983 unsigned int opc, rs1, rs2, rd;
2984 TCGv cpu_src1, cpu_src2;
2985 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2986 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2987 target_long simm;
2989 opc = GET_FIELD(insn, 0, 1);
2990 rd = GET_FIELD(insn, 2, 6);
2992 switch (opc) {
2993 case 0: /* branches/sethi */
2995 unsigned int xop = GET_FIELD(insn, 7, 9);
2996 int32_t target;
2997 switch (xop) {
2998 #ifdef TARGET_SPARC64
2999 case 0x1: /* V9 BPcc */
3001 int cc;
3003 target = GET_FIELD_SP(insn, 0, 18);
3004 target = sign_extend(target, 19);
3005 target <<= 2;
3006 cc = GET_FIELD_SP(insn, 20, 21);
3007 if (cc == 0)
3008 do_branch(dc, target, insn, 0);
3009 else if (cc == 2)
3010 do_branch(dc, target, insn, 1);
3011 else
3012 goto illegal_insn;
3013 goto jmp_insn;
3015 case 0x3: /* V9 BPr */
3017 target = GET_FIELD_SP(insn, 0, 13) |
3018 (GET_FIELD_SP(insn, 20, 21) << 14);
3019 target = sign_extend(target, 16);
3020 target <<= 2;
3021 cpu_src1 = get_src1(dc, insn);
3022 do_branch_reg(dc, target, insn, cpu_src1);
3023 goto jmp_insn;
3025 case 0x5: /* V9 FBPcc */
3027 int cc = GET_FIELD_SP(insn, 20, 21);
3028 if (gen_trap_ifnofpu(dc)) {
3029 goto jmp_insn;
3031 target = GET_FIELD_SP(insn, 0, 18);
3032 target = sign_extend(target, 19);
3033 target <<= 2;
3034 do_fbranch(dc, target, insn, cc);
3035 goto jmp_insn;
3037 #else
3038 case 0x7: /* CBN+x */
3040 goto ncp_insn;
3042 #endif
3043 case 0x2: /* BN+x */
3045 target = GET_FIELD(insn, 10, 31);
3046 target = sign_extend(target, 22);
3047 target <<= 2;
3048 do_branch(dc, target, insn, 0);
3049 goto jmp_insn;
3051 case 0x6: /* FBN+x */
3053 if (gen_trap_ifnofpu(dc)) {
3054 goto jmp_insn;
3056 target = GET_FIELD(insn, 10, 31);
3057 target = sign_extend(target, 22);
3058 target <<= 2;
3059 do_fbranch(dc, target, insn, 0);
3060 goto jmp_insn;
3062 case 0x4: /* SETHI */
3063 /* Special-case %g0 because that's the canonical nop. */
3064 if (rd) {
3065 uint32_t value = GET_FIELD(insn, 10, 31);
3066 TCGv t = gen_dest_gpr(dc, rd);
3067 tcg_gen_movi_tl(t, value << 10);
3068 gen_store_gpr(dc, rd, t);
3070 break;
3071 case 0x0: /* UNIMPL */
3072 default:
3073 goto illegal_insn;
3075 break;
3077 break;
3078 case 1: /*CALL*/
3080 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3081 TCGv o7 = gen_dest_gpr(dc, 15);
3083 tcg_gen_movi_tl(o7, dc->pc);
3084 gen_store_gpr(dc, 15, o7);
3085 target += dc->pc;
3086 gen_mov_pc_npc(dc);
3087 #ifdef TARGET_SPARC64
3088 if (unlikely(AM_CHECK(dc))) {
3089 target &= 0xffffffffULL;
3091 #endif
3092 dc->npc = target;
3094 goto jmp_insn;
3095 case 2: /* FPU & Logical Operations */
3097 unsigned int xop = GET_FIELD(insn, 7, 12);
3098 TCGv cpu_dst = get_temp_tl(dc);
3099 TCGv cpu_tmp0;
3101 if (xop == 0x3a) { /* generate trap */
3102 int cond = GET_FIELD(insn, 3, 6);
3103 TCGv_i32 trap;
3104 TCGLabel *l1 = NULL;
3105 int mask;
3107 if (cond == 0) {
3108 /* Trap never. */
3109 break;
3112 save_state(dc);
3114 if (cond != 8) {
3115 /* Conditional trap. */
3116 DisasCompare cmp;
3117 #ifdef TARGET_SPARC64
3118 /* V9 icc/xcc */
3119 int cc = GET_FIELD_SP(insn, 11, 12);
3120 if (cc == 0) {
3121 gen_compare(&cmp, 0, cond, dc);
3122 } else if (cc == 2) {
3123 gen_compare(&cmp, 1, cond, dc);
3124 } else {
3125 goto illegal_insn;
3127 #else
3128 gen_compare(&cmp, 0, cond, dc);
3129 #endif
3130 l1 = gen_new_label();
3131 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3132 cmp.c1, cmp.c2, l1);
3133 free_compare(&cmp);
3136 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3137 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3139 /* Don't use the normal temporaries, as they may well have
3140 gone out of scope with the branch above. While we're
3141 doing that we might as well pre-truncate to 32-bit. */
3142 trap = tcg_temp_new_i32();
3144 rs1 = GET_FIELD_SP(insn, 14, 18);
3145 if (IS_IMM) {
3146 rs2 = GET_FIELD_SP(insn, 0, 6);
3147 if (rs1 == 0) {
3148 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3149 /* Signal that the trap value is fully constant. */
3150 mask = 0;
3151 } else {
3152 TCGv t1 = gen_load_gpr(dc, rs1);
3153 tcg_gen_trunc_tl_i32(trap, t1);
3154 tcg_gen_addi_i32(trap, trap, rs2);
3156 } else {
3157 TCGv t1, t2;
3158 rs2 = GET_FIELD_SP(insn, 0, 4);
3159 t1 = gen_load_gpr(dc, rs1);
3160 t2 = gen_load_gpr(dc, rs2);
3161 tcg_gen_add_tl(t1, t1, t2);
3162 tcg_gen_trunc_tl_i32(trap, t1);
3164 if (mask != 0) {
3165 tcg_gen_andi_i32(trap, trap, mask);
3166 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3169 gen_helper_raise_exception(cpu_env, trap);
3170 tcg_temp_free_i32(trap);
3172 if (cond == 8) {
3173 /* An unconditional trap ends the TB. */
3174 dc->is_br = 1;
3175 goto jmp_insn;
3176 } else {
3177 /* A conditional trap falls through to the next insn. */
3178 gen_set_label(l1);
3179 break;
3181 } else if (xop == 0x28) {
3182 rs1 = GET_FIELD(insn, 13, 17);
3183 switch(rs1) {
3184 case 0: /* rdy */
3185 #ifndef TARGET_SPARC64
3186 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3187 manual, rdy on the microSPARC
3188 II */
3189 case 0x0f: /* stbar in the SPARCv8 manual,
3190 rdy on the microSPARC II */
3191 case 0x10 ... 0x1f: /* implementation-dependent in the
3192 SPARCv8 manual, rdy on the
3193 microSPARC II */
3194 /* Read Asr17 */
3195 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3196 TCGv t = gen_dest_gpr(dc, rd);
3197 /* Read Asr17 for a Leon3 monoprocessor */
3198 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3199 gen_store_gpr(dc, rd, t);
3200 break;
3202 #endif
3203 gen_store_gpr(dc, rd, cpu_y);
3204 break;
3205 #ifdef TARGET_SPARC64
3206 case 0x2: /* V9 rdccr */
3207 update_psr(dc);
3208 gen_helper_rdccr(cpu_dst, cpu_env);
3209 gen_store_gpr(dc, rd, cpu_dst);
3210 break;
3211 case 0x3: /* V9 rdasi */
3212 tcg_gen_movi_tl(cpu_dst, dc->asi);
3213 gen_store_gpr(dc, rd, cpu_dst);
3214 break;
3215 case 0x4: /* V9 rdtick */
3217 TCGv_ptr r_tickptr;
3218 TCGv_i32 r_const;
3220 r_tickptr = tcg_temp_new_ptr();
3221 r_const = tcg_const_i32(dc->mem_idx);
3222 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3223 offsetof(CPUSPARCState, tick));
3224 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3225 r_const);
3226 tcg_temp_free_ptr(r_tickptr);
3227 tcg_temp_free_i32(r_const);
3228 gen_store_gpr(dc, rd, cpu_dst);
3230 break;
3231 case 0x5: /* V9 rdpc */
3233 TCGv t = gen_dest_gpr(dc, rd);
3234 if (unlikely(AM_CHECK(dc))) {
3235 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3236 } else {
3237 tcg_gen_movi_tl(t, dc->pc);
3239 gen_store_gpr(dc, rd, t);
3241 break;
3242 case 0x6: /* V9 rdfprs */
3243 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3244 gen_store_gpr(dc, rd, cpu_dst);
3245 break;
3246 case 0xf: /* V9 membar */
3247 break; /* no effect */
3248 case 0x13: /* Graphics Status */
3249 if (gen_trap_ifnofpu(dc)) {
3250 goto jmp_insn;
3252 gen_store_gpr(dc, rd, cpu_gsr);
3253 break;
3254 case 0x16: /* Softint */
3255 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3256 offsetof(CPUSPARCState, softint));
3257 gen_store_gpr(dc, rd, cpu_dst);
3258 break;
3259 case 0x17: /* Tick compare */
3260 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3261 break;
3262 case 0x18: /* System tick */
3264 TCGv_ptr r_tickptr;
3265 TCGv_i32 r_const;
3267 r_tickptr = tcg_temp_new_ptr();
3268 r_const = tcg_const_i32(dc->mem_idx);
3269 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3270 offsetof(CPUSPARCState, stick));
3271 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3272 r_const);
3273 tcg_temp_free_ptr(r_tickptr);
3274 tcg_temp_free_i32(r_const);
3275 gen_store_gpr(dc, rd, cpu_dst);
3277 break;
3278 case 0x19: /* System tick compare */
3279 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3280 break;
3281 case 0x10: /* Performance Control */
3282 case 0x11: /* Performance Instrumentation Counter */
3283 case 0x12: /* Dispatch Control */
3284 case 0x14: /* Softint set, WO */
3285 case 0x15: /* Softint clear, WO */
3286 #endif
3287 default:
3288 goto illegal_insn;
3290 #if !defined(CONFIG_USER_ONLY)
3291 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3292 #ifndef TARGET_SPARC64
3293 if (!supervisor(dc)) {
3294 goto priv_insn;
3296 update_psr(dc);
3297 gen_helper_rdpsr(cpu_dst, cpu_env);
3298 #else
3299 CHECK_IU_FEATURE(dc, HYPV);
3300 if (!hypervisor(dc))
3301 goto priv_insn;
3302 rs1 = GET_FIELD(insn, 13, 17);
3303 switch (rs1) {
3304 case 0: // hpstate
3305 // gen_op_rdhpstate();
3306 break;
3307 case 1: // htstate
3308 // gen_op_rdhtstate();
3309 break;
3310 case 3: // hintp
3311 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3312 break;
3313 case 5: // htba
3314 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3315 break;
3316 case 6: // hver
3317 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3318 break;
3319 case 31: // hstick_cmpr
3320 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3321 break;
3322 default:
3323 goto illegal_insn;
3325 #endif
3326 gen_store_gpr(dc, rd, cpu_dst);
3327 break;
3328 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3329 if (!supervisor(dc)) {
3330 goto priv_insn;
3332 cpu_tmp0 = get_temp_tl(dc);
3333 #ifdef TARGET_SPARC64
3334 rs1 = GET_FIELD(insn, 13, 17);
3335 switch (rs1) {
3336 case 0: // tpc
3338 TCGv_ptr r_tsptr;
3340 r_tsptr = tcg_temp_new_ptr();
3341 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3342 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3343 offsetof(trap_state, tpc));
3344 tcg_temp_free_ptr(r_tsptr);
3346 break;
3347 case 1: // tnpc
3349 TCGv_ptr r_tsptr;
3351 r_tsptr = tcg_temp_new_ptr();
3352 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3353 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3354 offsetof(trap_state, tnpc));
3355 tcg_temp_free_ptr(r_tsptr);
3357 break;
3358 case 2: // tstate
3360 TCGv_ptr r_tsptr;
3362 r_tsptr = tcg_temp_new_ptr();
3363 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3364 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3365 offsetof(trap_state, tstate));
3366 tcg_temp_free_ptr(r_tsptr);
3368 break;
3369 case 3: // tt
3371 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3373 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3374 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3375 offsetof(trap_state, tt));
3376 tcg_temp_free_ptr(r_tsptr);
3378 break;
3379 case 4: // tick
3381 TCGv_ptr r_tickptr;
3382 TCGv_i32 r_const;
3384 r_tickptr = tcg_temp_new_ptr();
3385 r_const = tcg_const_i32(dc->mem_idx);
3386 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3387 offsetof(CPUSPARCState, tick));
3388 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3389 r_tickptr, r_const);
3390 tcg_temp_free_ptr(r_tickptr);
3391 tcg_temp_free_i32(r_const);
3393 break;
3394 case 5: // tba
3395 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3396 break;
3397 case 6: // pstate
3398 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3399 offsetof(CPUSPARCState, pstate));
3400 break;
3401 case 7: // tl
3402 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3403 offsetof(CPUSPARCState, tl));
3404 break;
3405 case 8: // pil
3406 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3407 offsetof(CPUSPARCState, psrpil));
3408 break;
3409 case 9: // cwp
3410 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3411 break;
3412 case 10: // cansave
3413 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3414 offsetof(CPUSPARCState, cansave));
3415 break;
3416 case 11: // canrestore
3417 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3418 offsetof(CPUSPARCState, canrestore));
3419 break;
3420 case 12: // cleanwin
3421 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3422 offsetof(CPUSPARCState, cleanwin));
3423 break;
3424 case 13: // otherwin
3425 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3426 offsetof(CPUSPARCState, otherwin));
3427 break;
3428 case 14: // wstate
3429 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3430 offsetof(CPUSPARCState, wstate));
3431 break;
3432 case 16: // UA2005 gl
3433 CHECK_IU_FEATURE(dc, GL);
3434 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3435 offsetof(CPUSPARCState, gl));
3436 break;
3437 case 26: // UA2005 strand status
3438 CHECK_IU_FEATURE(dc, HYPV);
3439 if (!hypervisor(dc))
3440 goto priv_insn;
3441 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3442 break;
3443 case 31: // ver
3444 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3445 break;
3446 case 15: // fq
3447 default:
3448 goto illegal_insn;
3450 #else
3451 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3452 #endif
3453 gen_store_gpr(dc, rd, cpu_tmp0);
3454 break;
3455 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3456 #ifdef TARGET_SPARC64
3457 save_state(dc);
3458 gen_helper_flushw(cpu_env);
3459 #else
3460 if (!supervisor(dc))
3461 goto priv_insn;
3462 gen_store_gpr(dc, rd, cpu_tbr);
3463 #endif
3464 break;
3465 #endif
3466 } else if (xop == 0x34) { /* FPU Operations */
3467 if (gen_trap_ifnofpu(dc)) {
3468 goto jmp_insn;
3470 gen_op_clear_ieee_excp_and_FTT();
3471 rs1 = GET_FIELD(insn, 13, 17);
3472 rs2 = GET_FIELD(insn, 27, 31);
3473 xop = GET_FIELD(insn, 18, 26);
3475 switch (xop) {
3476 case 0x1: /* fmovs */
3477 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3478 gen_store_fpr_F(dc, rd, cpu_src1_32);
3479 break;
3480 case 0x5: /* fnegs */
3481 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3482 break;
3483 case 0x9: /* fabss */
3484 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3485 break;
3486 case 0x29: /* fsqrts */
3487 CHECK_FPU_FEATURE(dc, FSQRT);
3488 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3489 break;
3490 case 0x2a: /* fsqrtd */
3491 CHECK_FPU_FEATURE(dc, FSQRT);
3492 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3493 break;
3494 case 0x2b: /* fsqrtq */
3495 CHECK_FPU_FEATURE(dc, FLOAT128);
3496 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3497 break;
3498 case 0x41: /* fadds */
3499 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3500 break;
3501 case 0x42: /* faddd */
3502 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3503 break;
3504 case 0x43: /* faddq */
3505 CHECK_FPU_FEATURE(dc, FLOAT128);
3506 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3507 break;
3508 case 0x45: /* fsubs */
3509 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3510 break;
3511 case 0x46: /* fsubd */
3512 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3513 break;
3514 case 0x47: /* fsubq */
3515 CHECK_FPU_FEATURE(dc, FLOAT128);
3516 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3517 break;
3518 case 0x49: /* fmuls */
3519 CHECK_FPU_FEATURE(dc, FMUL);
3520 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3521 break;
3522 case 0x4a: /* fmuld */
3523 CHECK_FPU_FEATURE(dc, FMUL);
3524 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3525 break;
3526 case 0x4b: /* fmulq */
3527 CHECK_FPU_FEATURE(dc, FLOAT128);
3528 CHECK_FPU_FEATURE(dc, FMUL);
3529 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3530 break;
3531 case 0x4d: /* fdivs */
3532 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3533 break;
3534 case 0x4e: /* fdivd */
3535 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3536 break;
3537 case 0x4f: /* fdivq */
3538 CHECK_FPU_FEATURE(dc, FLOAT128);
3539 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3540 break;
3541 case 0x69: /* fsmuld */
3542 CHECK_FPU_FEATURE(dc, FSMULD);
3543 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3544 break;
3545 case 0x6e: /* fdmulq */
3546 CHECK_FPU_FEATURE(dc, FLOAT128);
3547 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3548 break;
3549 case 0xc4: /* fitos */
3550 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3551 break;
3552 case 0xc6: /* fdtos */
3553 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3554 break;
3555 case 0xc7: /* fqtos */
3556 CHECK_FPU_FEATURE(dc, FLOAT128);
3557 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3558 break;
3559 case 0xc8: /* fitod */
3560 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3561 break;
3562 case 0xc9: /* fstod */
3563 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3564 break;
3565 case 0xcb: /* fqtod */
3566 CHECK_FPU_FEATURE(dc, FLOAT128);
3567 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3568 break;
3569 case 0xcc: /* fitoq */
3570 CHECK_FPU_FEATURE(dc, FLOAT128);
3571 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3572 break;
3573 case 0xcd: /* fstoq */
3574 CHECK_FPU_FEATURE(dc, FLOAT128);
3575 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3576 break;
3577 case 0xce: /* fdtoq */
3578 CHECK_FPU_FEATURE(dc, FLOAT128);
3579 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3580 break;
3581 case 0xd1: /* fstoi */
3582 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3583 break;
3584 case 0xd2: /* fdtoi */
3585 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3586 break;
3587 case 0xd3: /* fqtoi */
3588 CHECK_FPU_FEATURE(dc, FLOAT128);
3589 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3590 break;
3591 #ifdef TARGET_SPARC64
3592 case 0x2: /* V9 fmovd */
3593 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3594 gen_store_fpr_D(dc, rd, cpu_src1_64);
3595 break;
3596 case 0x3: /* V9 fmovq */
3597 CHECK_FPU_FEATURE(dc, FLOAT128);
3598 gen_move_Q(dc, rd, rs2);
3599 break;
3600 case 0x6: /* V9 fnegd */
3601 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3602 break;
3603 case 0x7: /* V9 fnegq */
3604 CHECK_FPU_FEATURE(dc, FLOAT128);
3605 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3606 break;
3607 case 0xa: /* V9 fabsd */
3608 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3609 break;
3610 case 0xb: /* V9 fabsq */
3611 CHECK_FPU_FEATURE(dc, FLOAT128);
3612 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3613 break;
3614 case 0x81: /* V9 fstox */
3615 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3616 break;
3617 case 0x82: /* V9 fdtox */
3618 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3619 break;
3620 case 0x83: /* V9 fqtox */
3621 CHECK_FPU_FEATURE(dc, FLOAT128);
3622 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3623 break;
3624 case 0x84: /* V9 fxtos */
3625 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3626 break;
3627 case 0x88: /* V9 fxtod */
3628 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3629 break;
3630 case 0x8c: /* V9 fxtoq */
3631 CHECK_FPU_FEATURE(dc, FLOAT128);
3632 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3633 break;
3634 #endif
3635 default:
3636 goto illegal_insn;
3638 } else if (xop == 0x35) { /* FPU Operations */
3639 #ifdef TARGET_SPARC64
3640 int cond;
3641 #endif
3642 if (gen_trap_ifnofpu(dc)) {
3643 goto jmp_insn;
3645 gen_op_clear_ieee_excp_and_FTT();
3646 rs1 = GET_FIELD(insn, 13, 17);
3647 rs2 = GET_FIELD(insn, 27, 31);
3648 xop = GET_FIELD(insn, 18, 26);
3650 #ifdef TARGET_SPARC64
3651 #define FMOVR(sz) \
3652 do { \
3653 DisasCompare cmp; \
3654 cond = GET_FIELD_SP(insn, 10, 12); \
3655 cpu_src1 = get_src1(dc, insn); \
3656 gen_compare_reg(&cmp, cond, cpu_src1); \
3657 gen_fmov##sz(dc, &cmp, rd, rs2); \
3658 free_compare(&cmp); \
3659 } while (0)
3661 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3662 FMOVR(s);
3663 break;
3664 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3665 FMOVR(d);
3666 break;
3667 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3668 CHECK_FPU_FEATURE(dc, FLOAT128);
3669 FMOVR(q);
3670 break;
3672 #undef FMOVR
3673 #endif
3674 switch (xop) {
3675 #ifdef TARGET_SPARC64
3676 #define FMOVCC(fcc, sz) \
3677 do { \
3678 DisasCompare cmp; \
3679 cond = GET_FIELD_SP(insn, 14, 17); \
3680 gen_fcompare(&cmp, fcc, cond); \
3681 gen_fmov##sz(dc, &cmp, rd, rs2); \
3682 free_compare(&cmp); \
3683 } while (0)
3685 case 0x001: /* V9 fmovscc %fcc0 */
3686 FMOVCC(0, s);
3687 break;
3688 case 0x002: /* V9 fmovdcc %fcc0 */
3689 FMOVCC(0, d);
3690 break;
3691 case 0x003: /* V9 fmovqcc %fcc0 */
3692 CHECK_FPU_FEATURE(dc, FLOAT128);
3693 FMOVCC(0, q);
3694 break;
3695 case 0x041: /* V9 fmovscc %fcc1 */
3696 FMOVCC(1, s);
3697 break;
3698 case 0x042: /* V9 fmovdcc %fcc1 */
3699 FMOVCC(1, d);
3700 break;
3701 case 0x043: /* V9 fmovqcc %fcc1 */
3702 CHECK_FPU_FEATURE(dc, FLOAT128);
3703 FMOVCC(1, q);
3704 break;
3705 case 0x081: /* V9 fmovscc %fcc2 */
3706 FMOVCC(2, s);
3707 break;
3708 case 0x082: /* V9 fmovdcc %fcc2 */
3709 FMOVCC(2, d);
3710 break;
3711 case 0x083: /* V9 fmovqcc %fcc2 */
3712 CHECK_FPU_FEATURE(dc, FLOAT128);
3713 FMOVCC(2, q);
3714 break;
3715 case 0x0c1: /* V9 fmovscc %fcc3 */
3716 FMOVCC(3, s);
3717 break;
3718 case 0x0c2: /* V9 fmovdcc %fcc3 */
3719 FMOVCC(3, d);
3720 break;
3721 case 0x0c3: /* V9 fmovqcc %fcc3 */
3722 CHECK_FPU_FEATURE(dc, FLOAT128);
3723 FMOVCC(3, q);
3724 break;
3725 #undef FMOVCC
3726 #define FMOVCC(xcc, sz) \
3727 do { \
3728 DisasCompare cmp; \
3729 cond = GET_FIELD_SP(insn, 14, 17); \
3730 gen_compare(&cmp, xcc, cond, dc); \
3731 gen_fmov##sz(dc, &cmp, rd, rs2); \
3732 free_compare(&cmp); \
3733 } while (0)
3735 case 0x101: /* V9 fmovscc %icc */
3736 FMOVCC(0, s);
3737 break;
3738 case 0x102: /* V9 fmovdcc %icc */
3739 FMOVCC(0, d);
3740 break;
3741 case 0x103: /* V9 fmovqcc %icc */
3742 CHECK_FPU_FEATURE(dc, FLOAT128);
3743 FMOVCC(0, q);
3744 break;
3745 case 0x181: /* V9 fmovscc %xcc */
3746 FMOVCC(1, s);
3747 break;
3748 case 0x182: /* V9 fmovdcc %xcc */
3749 FMOVCC(1, d);
3750 break;
3751 case 0x183: /* V9 fmovqcc %xcc */
3752 CHECK_FPU_FEATURE(dc, FLOAT128);
3753 FMOVCC(1, q);
3754 break;
3755 #undef FMOVCC
3756 #endif
3757 case 0x51: /* fcmps, V9 %fcc */
3758 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3759 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3760 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3761 break;
3762 case 0x52: /* fcmpd, V9 %fcc */
3763 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3764 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3765 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3766 break;
3767 case 0x53: /* fcmpq, V9 %fcc */
3768 CHECK_FPU_FEATURE(dc, FLOAT128);
3769 gen_op_load_fpr_QT0(QFPREG(rs1));
3770 gen_op_load_fpr_QT1(QFPREG(rs2));
3771 gen_op_fcmpq(rd & 3);
3772 break;
3773 case 0x55: /* fcmpes, V9 %fcc */
3774 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3775 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3776 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3777 break;
3778 case 0x56: /* fcmped, V9 %fcc */
3779 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3780 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3781 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3782 break;
3783 case 0x57: /* fcmpeq, V9 %fcc */
3784 CHECK_FPU_FEATURE(dc, FLOAT128);
3785 gen_op_load_fpr_QT0(QFPREG(rs1));
3786 gen_op_load_fpr_QT1(QFPREG(rs2));
3787 gen_op_fcmpeq(rd & 3);
3788 break;
3789 default:
3790 goto illegal_insn;
3792 } else if (xop == 0x2) {
3793 TCGv dst = gen_dest_gpr(dc, rd);
3794 rs1 = GET_FIELD(insn, 13, 17);
3795 if (rs1 == 0) {
3796 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3797 if (IS_IMM) { /* immediate */
3798 simm = GET_FIELDs(insn, 19, 31);
3799 tcg_gen_movi_tl(dst, simm);
3800 gen_store_gpr(dc, rd, dst);
3801 } else { /* register */
3802 rs2 = GET_FIELD(insn, 27, 31);
3803 if (rs2 == 0) {
3804 tcg_gen_movi_tl(dst, 0);
3805 gen_store_gpr(dc, rd, dst);
3806 } else {
3807 cpu_src2 = gen_load_gpr(dc, rs2);
3808 gen_store_gpr(dc, rd, cpu_src2);
3811 } else {
3812 cpu_src1 = get_src1(dc, insn);
3813 if (IS_IMM) { /* immediate */
3814 simm = GET_FIELDs(insn, 19, 31);
3815 tcg_gen_ori_tl(dst, cpu_src1, simm);
3816 gen_store_gpr(dc, rd, dst);
3817 } else { /* register */
3818 rs2 = GET_FIELD(insn, 27, 31);
3819 if (rs2 == 0) {
3820 /* mov shortcut: or x, %g0, y -> mov x, y */
3821 gen_store_gpr(dc, rd, cpu_src1);
3822 } else {
3823 cpu_src2 = gen_load_gpr(dc, rs2);
3824 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3825 gen_store_gpr(dc, rd, dst);
3829 #ifdef TARGET_SPARC64
3830 } else if (xop == 0x25) { /* sll, V9 sllx */
3831 cpu_src1 = get_src1(dc, insn);
3832 if (IS_IMM) { /* immediate */
3833 simm = GET_FIELDs(insn, 20, 31);
3834 if (insn & (1 << 12)) {
3835 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3836 } else {
3837 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3839 } else { /* register */
3840 rs2 = GET_FIELD(insn, 27, 31);
3841 cpu_src2 = gen_load_gpr(dc, rs2);
3842 cpu_tmp0 = get_temp_tl(dc);
3843 if (insn & (1 << 12)) {
3844 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3845 } else {
3846 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3848 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3850 gen_store_gpr(dc, rd, cpu_dst);
3851 } else if (xop == 0x26) { /* srl, V9 srlx */
3852 cpu_src1 = get_src1(dc, insn);
3853 if (IS_IMM) { /* immediate */
3854 simm = GET_FIELDs(insn, 20, 31);
3855 if (insn & (1 << 12)) {
3856 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3857 } else {
3858 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3859 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3861 } else { /* register */
3862 rs2 = GET_FIELD(insn, 27, 31);
3863 cpu_src2 = gen_load_gpr(dc, rs2);
3864 cpu_tmp0 = get_temp_tl(dc);
3865 if (insn & (1 << 12)) {
3866 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3867 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3868 } else {
3869 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3870 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3871 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3874 gen_store_gpr(dc, rd, cpu_dst);
3875 } else if (xop == 0x27) { /* sra, V9 srax */
3876 cpu_src1 = get_src1(dc, insn);
3877 if (IS_IMM) { /* immediate */
3878 simm = GET_FIELDs(insn, 20, 31);
3879 if (insn & (1 << 12)) {
3880 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3881 } else {
3882 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3883 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3885 } else { /* register */
3886 rs2 = GET_FIELD(insn, 27, 31);
3887 cpu_src2 = gen_load_gpr(dc, rs2);
3888 cpu_tmp0 = get_temp_tl(dc);
3889 if (insn & (1 << 12)) {
3890 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3891 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3892 } else {
3893 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3894 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3895 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3898 gen_store_gpr(dc, rd, cpu_dst);
3899 #endif
3900 } else if (xop < 0x36) {
3901 if (xop < 0x20) {
3902 cpu_src1 = get_src1(dc, insn);
3903 cpu_src2 = get_src2(dc, insn);
3904 switch (xop & ~0x10) {
3905 case 0x0: /* add */
3906 if (xop & 0x10) {
3907 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3908 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3909 dc->cc_op = CC_OP_ADD;
3910 } else {
3911 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3913 break;
3914 case 0x1: /* and */
3915 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3916 if (xop & 0x10) {
3917 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3918 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3919 dc->cc_op = CC_OP_LOGIC;
3921 break;
3922 case 0x2: /* or */
3923 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3924 if (xop & 0x10) {
3925 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3926 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3927 dc->cc_op = CC_OP_LOGIC;
3929 break;
3930 case 0x3: /* xor */
3931 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3932 if (xop & 0x10) {
3933 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3934 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3935 dc->cc_op = CC_OP_LOGIC;
3937 break;
3938 case 0x4: /* sub */
3939 if (xop & 0x10) {
3940 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3941 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3942 dc->cc_op = CC_OP_SUB;
3943 } else {
3944 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3946 break;
3947 case 0x5: /* andn */
3948 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3949 if (xop & 0x10) {
3950 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3951 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3952 dc->cc_op = CC_OP_LOGIC;
3954 break;
3955 case 0x6: /* orn */
3956 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3957 if (xop & 0x10) {
3958 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3959 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3960 dc->cc_op = CC_OP_LOGIC;
3962 break;
3963 case 0x7: /* xorn */
3964 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3965 if (xop & 0x10) {
3966 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3967 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3968 dc->cc_op = CC_OP_LOGIC;
3970 break;
3971 case 0x8: /* addx, V9 addc */
3972 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3973 (xop & 0x10));
3974 break;
3975 #ifdef TARGET_SPARC64
3976 case 0x9: /* V9 mulx */
3977 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3978 break;
3979 #endif
3980 case 0xa: /* umul */
3981 CHECK_IU_FEATURE(dc, MUL);
3982 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3983 if (xop & 0x10) {
3984 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3985 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3986 dc->cc_op = CC_OP_LOGIC;
3988 break;
3989 case 0xb: /* smul */
3990 CHECK_IU_FEATURE(dc, MUL);
3991 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3992 if (xop & 0x10) {
3993 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3994 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3995 dc->cc_op = CC_OP_LOGIC;
3997 break;
3998 case 0xc: /* subx, V9 subc */
3999 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4000 (xop & 0x10));
4001 break;
4002 #ifdef TARGET_SPARC64
4003 case 0xd: /* V9 udivx */
4004 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4005 break;
4006 #endif
4007 case 0xe: /* udiv */
4008 CHECK_IU_FEATURE(dc, DIV);
4009 if (xop & 0x10) {
4010 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4011 cpu_src2);
4012 dc->cc_op = CC_OP_DIV;
4013 } else {
4014 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4015 cpu_src2);
4017 break;
4018 case 0xf: /* sdiv */
4019 CHECK_IU_FEATURE(dc, DIV);
4020 if (xop & 0x10) {
4021 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4022 cpu_src2);
4023 dc->cc_op = CC_OP_DIV;
4024 } else {
4025 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4026 cpu_src2);
4028 break;
4029 default:
4030 goto illegal_insn;
4032 gen_store_gpr(dc, rd, cpu_dst);
4033 } else {
4034 cpu_src1 = get_src1(dc, insn);
4035 cpu_src2 = get_src2(dc, insn);
4036 switch (xop) {
4037 case 0x20: /* taddcc */
4038 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4039 gen_store_gpr(dc, rd, cpu_dst);
4040 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4041 dc->cc_op = CC_OP_TADD;
4042 break;
4043 case 0x21: /* tsubcc */
4044 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4045 gen_store_gpr(dc, rd, cpu_dst);
4046 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4047 dc->cc_op = CC_OP_TSUB;
4048 break;
4049 case 0x22: /* taddcctv */
4050 gen_helper_taddcctv(cpu_dst, cpu_env,
4051 cpu_src1, cpu_src2);
4052 gen_store_gpr(dc, rd, cpu_dst);
4053 dc->cc_op = CC_OP_TADDTV;
4054 break;
4055 case 0x23: /* tsubcctv */
4056 gen_helper_tsubcctv(cpu_dst, cpu_env,
4057 cpu_src1, cpu_src2);
4058 gen_store_gpr(dc, rd, cpu_dst);
4059 dc->cc_op = CC_OP_TSUBTV;
4060 break;
4061 case 0x24: /* mulscc */
4062 update_psr(dc);
4063 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4064 gen_store_gpr(dc, rd, cpu_dst);
4065 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4066 dc->cc_op = CC_OP_ADD;
4067 break;
4068 #ifndef TARGET_SPARC64
4069 case 0x25: /* sll */
4070 if (IS_IMM) { /* immediate */
4071 simm = GET_FIELDs(insn, 20, 31);
4072 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4073 } else { /* register */
4074 cpu_tmp0 = get_temp_tl(dc);
4075 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4076 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4078 gen_store_gpr(dc, rd, cpu_dst);
4079 break;
4080 case 0x26: /* srl */
4081 if (IS_IMM) { /* immediate */
4082 simm = GET_FIELDs(insn, 20, 31);
4083 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4084 } else { /* register */
4085 cpu_tmp0 = get_temp_tl(dc);
4086 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4087 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4089 gen_store_gpr(dc, rd, cpu_dst);
4090 break;
4091 case 0x27: /* sra */
4092 if (IS_IMM) { /* immediate */
4093 simm = GET_FIELDs(insn, 20, 31);
4094 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4095 } else { /* register */
4096 cpu_tmp0 = get_temp_tl(dc);
4097 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4098 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4100 gen_store_gpr(dc, rd, cpu_dst);
4101 break;
4102 #endif
4103 case 0x30:
4105 cpu_tmp0 = get_temp_tl(dc);
4106 switch(rd) {
4107 case 0: /* wry */
4108 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4109 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4110 break;
4111 #ifndef TARGET_SPARC64
4112 case 0x01 ... 0x0f: /* undefined in the
4113 SPARCv8 manual, nop
4114 on the microSPARC
4115 II */
4116 case 0x10 ... 0x1f: /* implementation-dependent
4117 in the SPARCv8
4118 manual, nop on the
4119 microSPARC II */
4120 if ((rd == 0x13) && (dc->def->features &
4121 CPU_FEATURE_POWERDOWN)) {
4122 /* LEON3 power-down */
4123 save_state(dc);
4124 gen_helper_power_down(cpu_env);
4126 break;
4127 #else
4128 case 0x2: /* V9 wrccr */
4129 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4130 gen_helper_wrccr(cpu_env, cpu_tmp0);
4131 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4132 dc->cc_op = CC_OP_FLAGS;
4133 break;
4134 case 0x3: /* V9 wrasi */
4135 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4136 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4137 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4138 offsetof(CPUSPARCState, asi));
4139 /* End TB to notice changed ASI. */
4140 save_state(dc);
4141 gen_op_next_insn();
4142 tcg_gen_exit_tb(0);
4143 dc->is_br = 1;
4144 break;
4145 case 0x6: /* V9 wrfprs */
4146 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4147 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4148 dc->fprs_dirty = 0;
4149 save_state(dc);
4150 gen_op_next_insn();
4151 tcg_gen_exit_tb(0);
4152 dc->is_br = 1;
4153 break;
4154 case 0xf: /* V9 sir, nop if user */
4155 #if !defined(CONFIG_USER_ONLY)
4156 if (supervisor(dc)) {
4157 ; // XXX
4159 #endif
4160 break;
4161 case 0x13: /* Graphics Status */
4162 if (gen_trap_ifnofpu(dc)) {
4163 goto jmp_insn;
4165 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4166 break;
4167 case 0x14: /* Softint set */
4168 if (!supervisor(dc))
4169 goto illegal_insn;
4170 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4171 gen_helper_set_softint(cpu_env, cpu_tmp0);
4172 break;
4173 case 0x15: /* Softint clear */
4174 if (!supervisor(dc))
4175 goto illegal_insn;
4176 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4177 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4178 break;
4179 case 0x16: /* Softint write */
4180 if (!supervisor(dc))
4181 goto illegal_insn;
4182 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4183 gen_helper_write_softint(cpu_env, cpu_tmp0);
4184 break;
4185 case 0x17: /* Tick compare */
4186 #if !defined(CONFIG_USER_ONLY)
4187 if (!supervisor(dc))
4188 goto illegal_insn;
4189 #endif
4191 TCGv_ptr r_tickptr;
4193 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4194 cpu_src2);
4195 r_tickptr = tcg_temp_new_ptr();
4196 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4197 offsetof(CPUSPARCState, tick));
4198 gen_helper_tick_set_limit(r_tickptr,
4199 cpu_tick_cmpr);
4200 tcg_temp_free_ptr(r_tickptr);
4202 break;
4203 case 0x18: /* System tick */
4204 #if !defined(CONFIG_USER_ONLY)
4205 if (!supervisor(dc))
4206 goto illegal_insn;
4207 #endif
4209 TCGv_ptr r_tickptr;
4211 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4212 cpu_src2);
4213 r_tickptr = tcg_temp_new_ptr();
4214 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4215 offsetof(CPUSPARCState, stick));
4216 gen_helper_tick_set_count(r_tickptr,
4217 cpu_tmp0);
4218 tcg_temp_free_ptr(r_tickptr);
4220 break;
4221 case 0x19: /* System tick compare */
4222 #if !defined(CONFIG_USER_ONLY)
4223 if (!supervisor(dc))
4224 goto illegal_insn;
4225 #endif
4227 TCGv_ptr r_tickptr;
4229 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4230 cpu_src2);
4231 r_tickptr = tcg_temp_new_ptr();
4232 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4233 offsetof(CPUSPARCState, stick));
4234 gen_helper_tick_set_limit(r_tickptr,
4235 cpu_stick_cmpr);
4236 tcg_temp_free_ptr(r_tickptr);
4238 break;
4240 case 0x10: /* Performance Control */
4241 case 0x11: /* Performance Instrumentation
4242 Counter */
4243 case 0x12: /* Dispatch Control */
4244 #endif
4245 default:
4246 goto illegal_insn;
4249 break;
4250 #if !defined(CONFIG_USER_ONLY)
4251 case 0x31: /* wrpsr, V9 saved, restored */
4253 if (!supervisor(dc))
4254 goto priv_insn;
4255 #ifdef TARGET_SPARC64
4256 switch (rd) {
4257 case 0:
4258 gen_helper_saved(cpu_env);
4259 break;
4260 case 1:
4261 gen_helper_restored(cpu_env);
4262 break;
4263 case 2: /* UA2005 allclean */
4264 case 3: /* UA2005 otherw */
4265 case 4: /* UA2005 normalw */
4266 case 5: /* UA2005 invalw */
4267 // XXX
4268 default:
4269 goto illegal_insn;
4271 #else
4272 cpu_tmp0 = get_temp_tl(dc);
4273 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4274 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4275 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4276 dc->cc_op = CC_OP_FLAGS;
4277 save_state(dc);
4278 gen_op_next_insn();
4279 tcg_gen_exit_tb(0);
4280 dc->is_br = 1;
4281 #endif
4283 break;
4284 case 0x32: /* wrwim, V9 wrpr */
4286 if (!supervisor(dc))
4287 goto priv_insn;
4288 cpu_tmp0 = get_temp_tl(dc);
4289 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4290 #ifdef TARGET_SPARC64
4291 switch (rd) {
4292 case 0: // tpc
4294 TCGv_ptr r_tsptr;
4296 r_tsptr = tcg_temp_new_ptr();
4297 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4298 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4299 offsetof(trap_state, tpc));
4300 tcg_temp_free_ptr(r_tsptr);
4302 break;
4303 case 1: // tnpc
4305 TCGv_ptr r_tsptr;
4307 r_tsptr = tcg_temp_new_ptr();
4308 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4309 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4310 offsetof(trap_state, tnpc));
4311 tcg_temp_free_ptr(r_tsptr);
4313 break;
4314 case 2: // tstate
4316 TCGv_ptr r_tsptr;
4318 r_tsptr = tcg_temp_new_ptr();
4319 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4320 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4321 offsetof(trap_state,
4322 tstate));
4323 tcg_temp_free_ptr(r_tsptr);
4325 break;
4326 case 3: // tt
4328 TCGv_ptr r_tsptr;
4330 r_tsptr = tcg_temp_new_ptr();
4331 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4332 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4333 offsetof(trap_state, tt));
4334 tcg_temp_free_ptr(r_tsptr);
4336 break;
4337 case 4: // tick
4339 TCGv_ptr r_tickptr;
4341 r_tickptr = tcg_temp_new_ptr();
4342 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4343 offsetof(CPUSPARCState, tick));
4344 gen_helper_tick_set_count(r_tickptr,
4345 cpu_tmp0);
4346 tcg_temp_free_ptr(r_tickptr);
4348 break;
4349 case 5: // tba
4350 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4351 break;
4352 case 6: // pstate
4353 save_state(dc);
4354 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4355 dc->npc = DYNAMIC_PC;
4356 break;
4357 case 7: // tl
4358 save_state(dc);
4359 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4360 offsetof(CPUSPARCState, tl));
4361 dc->npc = DYNAMIC_PC;
4362 break;
4363 case 8: // pil
4364 gen_helper_wrpil(cpu_env, cpu_tmp0);
4365 break;
4366 case 9: // cwp
4367 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4368 break;
4369 case 10: // cansave
4370 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4371 offsetof(CPUSPARCState,
4372 cansave));
4373 break;
4374 case 11: // canrestore
4375 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4376 offsetof(CPUSPARCState,
4377 canrestore));
4378 break;
4379 case 12: // cleanwin
4380 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4381 offsetof(CPUSPARCState,
4382 cleanwin));
4383 break;
4384 case 13: // otherwin
4385 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4386 offsetof(CPUSPARCState,
4387 otherwin));
4388 break;
4389 case 14: // wstate
4390 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4391 offsetof(CPUSPARCState,
4392 wstate));
4393 break;
4394 case 16: // UA2005 gl
4395 CHECK_IU_FEATURE(dc, GL);
4396 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4397 offsetof(CPUSPARCState, gl));
4398 break;
4399 case 26: // UA2005 strand status
4400 CHECK_IU_FEATURE(dc, HYPV);
4401 if (!hypervisor(dc))
4402 goto priv_insn;
4403 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4404 break;
4405 default:
4406 goto illegal_insn;
4408 #else
4409 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4410 if (dc->def->nwindows != 32) {
4411 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4412 (1 << dc->def->nwindows) - 1);
4414 #endif
4416 break;
4417 case 0x33: /* wrtbr, UA2005 wrhpr */
4419 #ifndef TARGET_SPARC64
4420 if (!supervisor(dc))
4421 goto priv_insn;
4422 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4423 #else
4424 CHECK_IU_FEATURE(dc, HYPV);
4425 if (!hypervisor(dc))
4426 goto priv_insn;
4427 cpu_tmp0 = get_temp_tl(dc);
4428 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4429 switch (rd) {
4430 case 0: // hpstate
4431 // XXX gen_op_wrhpstate();
4432 save_state(dc);
4433 gen_op_next_insn();
4434 tcg_gen_exit_tb(0);
4435 dc->is_br = 1;
4436 break;
4437 case 1: // htstate
4438 // XXX gen_op_wrhtstate();
4439 break;
4440 case 3: // hintp
4441 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4442 break;
4443 case 5: // htba
4444 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4445 break;
4446 case 31: // hstick_cmpr
4448 TCGv_ptr r_tickptr;
4450 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4451 r_tickptr = tcg_temp_new_ptr();
4452 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4453 offsetof(CPUSPARCState, hstick));
4454 gen_helper_tick_set_limit(r_tickptr,
4455 cpu_hstick_cmpr);
4456 tcg_temp_free_ptr(r_tickptr);
4458 break;
4459 case 6: // hver readonly
4460 default:
4461 goto illegal_insn;
4463 #endif
4465 break;
4466 #endif
4467 #ifdef TARGET_SPARC64
4468 case 0x2c: /* V9 movcc */
4470 int cc = GET_FIELD_SP(insn, 11, 12);
4471 int cond = GET_FIELD_SP(insn, 14, 17);
4472 DisasCompare cmp;
4473 TCGv dst;
4475 if (insn & (1 << 18)) {
4476 if (cc == 0) {
4477 gen_compare(&cmp, 0, cond, dc);
4478 } else if (cc == 2) {
4479 gen_compare(&cmp, 1, cond, dc);
4480 } else {
4481 goto illegal_insn;
4483 } else {
4484 gen_fcompare(&cmp, cc, cond);
4487 /* The get_src2 above loaded the normal 13-bit
4488 immediate field, not the 11-bit field we have
4489 in movcc. But it did handle the reg case. */
4490 if (IS_IMM) {
4491 simm = GET_FIELD_SPs(insn, 0, 10);
4492 tcg_gen_movi_tl(cpu_src2, simm);
4495 dst = gen_load_gpr(dc, rd);
4496 tcg_gen_movcond_tl(cmp.cond, dst,
4497 cmp.c1, cmp.c2,
4498 cpu_src2, dst);
4499 free_compare(&cmp);
4500 gen_store_gpr(dc, rd, dst);
4501 break;
4503 case 0x2d: /* V9 sdivx */
4504 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4505 gen_store_gpr(dc, rd, cpu_dst);
4506 break;
4507 case 0x2e: /* V9 popc */
4508 gen_helper_popc(cpu_dst, cpu_src2);
4509 gen_store_gpr(dc, rd, cpu_dst);
4510 break;
4511 case 0x2f: /* V9 movr */
4513 int cond = GET_FIELD_SP(insn, 10, 12);
4514 DisasCompare cmp;
4515 TCGv dst;
4517 gen_compare_reg(&cmp, cond, cpu_src1);
4519 /* The get_src2 above loaded the normal 13-bit
4520 immediate field, not the 10-bit field we have
4521 in movr. But it did handle the reg case. */
4522 if (IS_IMM) {
4523 simm = GET_FIELD_SPs(insn, 0, 9);
4524 tcg_gen_movi_tl(cpu_src2, simm);
4527 dst = gen_load_gpr(dc, rd);
4528 tcg_gen_movcond_tl(cmp.cond, dst,
4529 cmp.c1, cmp.c2,
4530 cpu_src2, dst);
4531 free_compare(&cmp);
4532 gen_store_gpr(dc, rd, dst);
4533 break;
4535 #endif
4536 default:
4537 goto illegal_insn;
4540 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4541 #ifdef TARGET_SPARC64
4542 int opf = GET_FIELD_SP(insn, 5, 13);
4543 rs1 = GET_FIELD(insn, 13, 17);
4544 rs2 = GET_FIELD(insn, 27, 31);
4545 if (gen_trap_ifnofpu(dc)) {
4546 goto jmp_insn;
4549 switch (opf) {
4550 case 0x000: /* VIS I edge8cc */
4551 CHECK_FPU_FEATURE(dc, VIS1);
4552 cpu_src1 = gen_load_gpr(dc, rs1);
4553 cpu_src2 = gen_load_gpr(dc, rs2);
4554 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4555 gen_store_gpr(dc, rd, cpu_dst);
4556 break;
4557 case 0x001: /* VIS II edge8n */
4558 CHECK_FPU_FEATURE(dc, VIS2);
4559 cpu_src1 = gen_load_gpr(dc, rs1);
4560 cpu_src2 = gen_load_gpr(dc, rs2);
4561 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4562 gen_store_gpr(dc, rd, cpu_dst);
4563 break;
4564 case 0x002: /* VIS I edge8lcc */
4565 CHECK_FPU_FEATURE(dc, VIS1);
4566 cpu_src1 = gen_load_gpr(dc, rs1);
4567 cpu_src2 = gen_load_gpr(dc, rs2);
4568 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4569 gen_store_gpr(dc, rd, cpu_dst);
4570 break;
4571 case 0x003: /* VIS II edge8ln */
4572 CHECK_FPU_FEATURE(dc, VIS2);
4573 cpu_src1 = gen_load_gpr(dc, rs1);
4574 cpu_src2 = gen_load_gpr(dc, rs2);
4575 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4576 gen_store_gpr(dc, rd, cpu_dst);
4577 break;
4578 case 0x004: /* VIS I edge16cc */
4579 CHECK_FPU_FEATURE(dc, VIS1);
4580 cpu_src1 = gen_load_gpr(dc, rs1);
4581 cpu_src2 = gen_load_gpr(dc, rs2);
4582 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4583 gen_store_gpr(dc, rd, cpu_dst);
4584 break;
4585 case 0x005: /* VIS II edge16n */
4586 CHECK_FPU_FEATURE(dc, VIS2);
4587 cpu_src1 = gen_load_gpr(dc, rs1);
4588 cpu_src2 = gen_load_gpr(dc, rs2);
4589 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4590 gen_store_gpr(dc, rd, cpu_dst);
4591 break;
4592 case 0x006: /* VIS I edge16lcc */
4593 CHECK_FPU_FEATURE(dc, VIS1);
4594 cpu_src1 = gen_load_gpr(dc, rs1);
4595 cpu_src2 = gen_load_gpr(dc, rs2);
4596 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4597 gen_store_gpr(dc, rd, cpu_dst);
4598 break;
4599 case 0x007: /* VIS II edge16ln */
4600 CHECK_FPU_FEATURE(dc, VIS2);
4601 cpu_src1 = gen_load_gpr(dc, rs1);
4602 cpu_src2 = gen_load_gpr(dc, rs2);
4603 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4604 gen_store_gpr(dc, rd, cpu_dst);
4605 break;
4606 case 0x008: /* VIS I edge32cc */
4607 CHECK_FPU_FEATURE(dc, VIS1);
4608 cpu_src1 = gen_load_gpr(dc, rs1);
4609 cpu_src2 = gen_load_gpr(dc, rs2);
4610 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4611 gen_store_gpr(dc, rd, cpu_dst);
4612 break;
4613 case 0x009: /* VIS II edge32n */
4614 CHECK_FPU_FEATURE(dc, VIS2);
4615 cpu_src1 = gen_load_gpr(dc, rs1);
4616 cpu_src2 = gen_load_gpr(dc, rs2);
4617 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4618 gen_store_gpr(dc, rd, cpu_dst);
4619 break;
4620 case 0x00a: /* VIS I edge32lcc */
4621 CHECK_FPU_FEATURE(dc, VIS1);
4622 cpu_src1 = gen_load_gpr(dc, rs1);
4623 cpu_src2 = gen_load_gpr(dc, rs2);
4624 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4625 gen_store_gpr(dc, rd, cpu_dst);
4626 break;
4627 case 0x00b: /* VIS II edge32ln */
4628 CHECK_FPU_FEATURE(dc, VIS2);
4629 cpu_src1 = gen_load_gpr(dc, rs1);
4630 cpu_src2 = gen_load_gpr(dc, rs2);
4631 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4632 gen_store_gpr(dc, rd, cpu_dst);
4633 break;
4634 case 0x010: /* VIS I array8 */
4635 CHECK_FPU_FEATURE(dc, VIS1);
4636 cpu_src1 = gen_load_gpr(dc, rs1);
4637 cpu_src2 = gen_load_gpr(dc, rs2);
4638 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4639 gen_store_gpr(dc, rd, cpu_dst);
4640 break;
4641 case 0x012: /* VIS I array16 */
4642 CHECK_FPU_FEATURE(dc, VIS1);
4643 cpu_src1 = gen_load_gpr(dc, rs1);
4644 cpu_src2 = gen_load_gpr(dc, rs2);
4645 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4646 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4647 gen_store_gpr(dc, rd, cpu_dst);
4648 break;
4649 case 0x014: /* VIS I array32 */
4650 CHECK_FPU_FEATURE(dc, VIS1);
4651 cpu_src1 = gen_load_gpr(dc, rs1);
4652 cpu_src2 = gen_load_gpr(dc, rs2);
4653 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4654 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4655 gen_store_gpr(dc, rd, cpu_dst);
4656 break;
4657 case 0x018: /* VIS I alignaddr */
4658 CHECK_FPU_FEATURE(dc, VIS1);
4659 cpu_src1 = gen_load_gpr(dc, rs1);
4660 cpu_src2 = gen_load_gpr(dc, rs2);
4661 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4662 gen_store_gpr(dc, rd, cpu_dst);
4663 break;
4664 case 0x01a: /* VIS I alignaddrl */
4665 CHECK_FPU_FEATURE(dc, VIS1);
4666 cpu_src1 = gen_load_gpr(dc, rs1);
4667 cpu_src2 = gen_load_gpr(dc, rs2);
4668 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4669 gen_store_gpr(dc, rd, cpu_dst);
4670 break;
4671 case 0x019: /* VIS II bmask */
4672 CHECK_FPU_FEATURE(dc, VIS2);
4673 cpu_src1 = gen_load_gpr(dc, rs1);
4674 cpu_src2 = gen_load_gpr(dc, rs2);
4675 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4676 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4677 gen_store_gpr(dc, rd, cpu_dst);
4678 break;
4679 case 0x020: /* VIS I fcmple16 */
4680 CHECK_FPU_FEATURE(dc, VIS1);
4681 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4682 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4683 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4684 gen_store_gpr(dc, rd, cpu_dst);
4685 break;
4686 case 0x022: /* VIS I fcmpne16 */
4687 CHECK_FPU_FEATURE(dc, VIS1);
4688 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4689 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4690 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4691 gen_store_gpr(dc, rd, cpu_dst);
4692 break;
4693 case 0x024: /* VIS I fcmple32 */
4694 CHECK_FPU_FEATURE(dc, VIS1);
4695 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4696 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4697 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4698 gen_store_gpr(dc, rd, cpu_dst);
4699 break;
4700 case 0x026: /* VIS I fcmpne32 */
4701 CHECK_FPU_FEATURE(dc, VIS1);
4702 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4703 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4704 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4705 gen_store_gpr(dc, rd, cpu_dst);
4706 break;
4707 case 0x028: /* VIS I fcmpgt16 */
4708 CHECK_FPU_FEATURE(dc, VIS1);
4709 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4710 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4711 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4712 gen_store_gpr(dc, rd, cpu_dst);
4713 break;
4714 case 0x02a: /* VIS I fcmpeq16 */
4715 CHECK_FPU_FEATURE(dc, VIS1);
4716 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4717 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4718 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4719 gen_store_gpr(dc, rd, cpu_dst);
4720 break;
4721 case 0x02c: /* VIS I fcmpgt32 */
4722 CHECK_FPU_FEATURE(dc, VIS1);
4723 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4724 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4725 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4726 gen_store_gpr(dc, rd, cpu_dst);
4727 break;
4728 case 0x02e: /* VIS I fcmpeq32 */
4729 CHECK_FPU_FEATURE(dc, VIS1);
4730 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4731 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4732 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4733 gen_store_gpr(dc, rd, cpu_dst);
4734 break;
4735 case 0x031: /* VIS I fmul8x16 */
4736 CHECK_FPU_FEATURE(dc, VIS1);
4737 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4738 break;
4739 case 0x033: /* VIS I fmul8x16au */
4740 CHECK_FPU_FEATURE(dc, VIS1);
4741 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4742 break;
4743 case 0x035: /* VIS I fmul8x16al */
4744 CHECK_FPU_FEATURE(dc, VIS1);
4745 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4746 break;
4747 case 0x036: /* VIS I fmul8sux16 */
4748 CHECK_FPU_FEATURE(dc, VIS1);
4749 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4750 break;
4751 case 0x037: /* VIS I fmul8ulx16 */
4752 CHECK_FPU_FEATURE(dc, VIS1);
4753 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4754 break;
4755 case 0x038: /* VIS I fmuld8sux16 */
4756 CHECK_FPU_FEATURE(dc, VIS1);
4757 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4758 break;
4759 case 0x039: /* VIS I fmuld8ulx16 */
4760 CHECK_FPU_FEATURE(dc, VIS1);
4761 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4762 break;
4763 case 0x03a: /* VIS I fpack32 */
4764 CHECK_FPU_FEATURE(dc, VIS1);
4765 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4766 break;
4767 case 0x03b: /* VIS I fpack16 */
4768 CHECK_FPU_FEATURE(dc, VIS1);
4769 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4770 cpu_dst_32 = gen_dest_fpr_F(dc);
4771 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4772 gen_store_fpr_F(dc, rd, cpu_dst_32);
4773 break;
4774 case 0x03d: /* VIS I fpackfix */
4775 CHECK_FPU_FEATURE(dc, VIS1);
4776 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4777 cpu_dst_32 = gen_dest_fpr_F(dc);
4778 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4779 gen_store_fpr_F(dc, rd, cpu_dst_32);
4780 break;
4781 case 0x03e: /* VIS I pdist */
4782 CHECK_FPU_FEATURE(dc, VIS1);
4783 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4784 break;
4785 case 0x048: /* VIS I faligndata */
4786 CHECK_FPU_FEATURE(dc, VIS1);
4787 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4788 break;
4789 case 0x04b: /* VIS I fpmerge */
4790 CHECK_FPU_FEATURE(dc, VIS1);
4791 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4792 break;
4793 case 0x04c: /* VIS II bshuffle */
4794 CHECK_FPU_FEATURE(dc, VIS2);
4795 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4796 break;
4797 case 0x04d: /* VIS I fexpand */
4798 CHECK_FPU_FEATURE(dc, VIS1);
4799 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4800 break;
4801 case 0x050: /* VIS I fpadd16 */
4802 CHECK_FPU_FEATURE(dc, VIS1);
4803 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4804 break;
4805 case 0x051: /* VIS I fpadd16s */
4806 CHECK_FPU_FEATURE(dc, VIS1);
4807 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4808 break;
4809 case 0x052: /* VIS I fpadd32 */
4810 CHECK_FPU_FEATURE(dc, VIS1);
4811 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4812 break;
4813 case 0x053: /* VIS I fpadd32s */
4814 CHECK_FPU_FEATURE(dc, VIS1);
4815 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4816 break;
4817 case 0x054: /* VIS I fpsub16 */
4818 CHECK_FPU_FEATURE(dc, VIS1);
4819 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4820 break;
4821 case 0x055: /* VIS I fpsub16s */
4822 CHECK_FPU_FEATURE(dc, VIS1);
4823 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4824 break;
4825 case 0x056: /* VIS I fpsub32 */
4826 CHECK_FPU_FEATURE(dc, VIS1);
4827 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4828 break;
4829 case 0x057: /* VIS I fpsub32s */
4830 CHECK_FPU_FEATURE(dc, VIS1);
4831 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4832 break;
4833 case 0x060: /* VIS I fzero */
4834 CHECK_FPU_FEATURE(dc, VIS1);
4835 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4836 tcg_gen_movi_i64(cpu_dst_64, 0);
4837 gen_store_fpr_D(dc, rd, cpu_dst_64);
4838 break;
4839 case 0x061: /* VIS I fzeros */
4840 CHECK_FPU_FEATURE(dc, VIS1);
4841 cpu_dst_32 = gen_dest_fpr_F(dc);
4842 tcg_gen_movi_i32(cpu_dst_32, 0);
4843 gen_store_fpr_F(dc, rd, cpu_dst_32);
4844 break;
4845 case 0x062: /* VIS I fnor */
4846 CHECK_FPU_FEATURE(dc, VIS1);
4847 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4848 break;
4849 case 0x063: /* VIS I fnors */
4850 CHECK_FPU_FEATURE(dc, VIS1);
4851 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4852 break;
4853 case 0x064: /* VIS I fandnot2 */
4854 CHECK_FPU_FEATURE(dc, VIS1);
4855 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4856 break;
4857 case 0x065: /* VIS I fandnot2s */
4858 CHECK_FPU_FEATURE(dc, VIS1);
4859 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4860 break;
4861 case 0x066: /* VIS I fnot2 */
4862 CHECK_FPU_FEATURE(dc, VIS1);
4863 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4864 break;
4865 case 0x067: /* VIS I fnot2s */
4866 CHECK_FPU_FEATURE(dc, VIS1);
4867 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4868 break;
4869 case 0x068: /* VIS I fandnot1 */
4870 CHECK_FPU_FEATURE(dc, VIS1);
4871 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4872 break;
4873 case 0x069: /* VIS I fandnot1s */
4874 CHECK_FPU_FEATURE(dc, VIS1);
4875 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4876 break;
4877 case 0x06a: /* VIS I fnot1 */
4878 CHECK_FPU_FEATURE(dc, VIS1);
4879 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4880 break;
4881 case 0x06b: /* VIS I fnot1s */
4882 CHECK_FPU_FEATURE(dc, VIS1);
4883 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4884 break;
4885 case 0x06c: /* VIS I fxor */
4886 CHECK_FPU_FEATURE(dc, VIS1);
4887 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4888 break;
4889 case 0x06d: /* VIS I fxors */
4890 CHECK_FPU_FEATURE(dc, VIS1);
4891 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4892 break;
4893 case 0x06e: /* VIS I fnand */
4894 CHECK_FPU_FEATURE(dc, VIS1);
4895 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4896 break;
4897 case 0x06f: /* VIS I fnands */
4898 CHECK_FPU_FEATURE(dc, VIS1);
4899 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4900 break;
4901 case 0x070: /* VIS I fand */
4902 CHECK_FPU_FEATURE(dc, VIS1);
4903 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4904 break;
4905 case 0x071: /* VIS I fands */
4906 CHECK_FPU_FEATURE(dc, VIS1);
4907 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4908 break;
4909 case 0x072: /* VIS I fxnor */
4910 CHECK_FPU_FEATURE(dc, VIS1);
4911 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4912 break;
4913 case 0x073: /* VIS I fxnors */
4914 CHECK_FPU_FEATURE(dc, VIS1);
4915 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4916 break;
4917 case 0x074: /* VIS I fsrc1 */
4918 CHECK_FPU_FEATURE(dc, VIS1);
4919 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4920 gen_store_fpr_D(dc, rd, cpu_src1_64);
4921 break;
4922 case 0x075: /* VIS I fsrc1s */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4925 gen_store_fpr_F(dc, rd, cpu_src1_32);
4926 break;
4927 case 0x076: /* VIS I fornot2 */
4928 CHECK_FPU_FEATURE(dc, VIS1);
4929 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4930 break;
4931 case 0x077: /* VIS I fornot2s */
4932 CHECK_FPU_FEATURE(dc, VIS1);
4933 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4934 break;
4935 case 0x078: /* VIS I fsrc2 */
4936 CHECK_FPU_FEATURE(dc, VIS1);
4937 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4938 gen_store_fpr_D(dc, rd, cpu_src1_64);
4939 break;
4940 case 0x079: /* VIS I fsrc2s */
4941 CHECK_FPU_FEATURE(dc, VIS1);
4942 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4943 gen_store_fpr_F(dc, rd, cpu_src1_32);
4944 break;
4945 case 0x07a: /* VIS I fornot1 */
4946 CHECK_FPU_FEATURE(dc, VIS1);
4947 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4948 break;
4949 case 0x07b: /* VIS I fornot1s */
4950 CHECK_FPU_FEATURE(dc, VIS1);
4951 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4952 break;
4953 case 0x07c: /* VIS I for */
4954 CHECK_FPU_FEATURE(dc, VIS1);
4955 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4956 break;
4957 case 0x07d: /* VIS I fors */
4958 CHECK_FPU_FEATURE(dc, VIS1);
4959 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4960 break;
4961 case 0x07e: /* VIS I fone */
4962 CHECK_FPU_FEATURE(dc, VIS1);
4963 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4964 tcg_gen_movi_i64(cpu_dst_64, -1);
4965 gen_store_fpr_D(dc, rd, cpu_dst_64);
4966 break;
4967 case 0x07f: /* VIS I fones */
4968 CHECK_FPU_FEATURE(dc, VIS1);
4969 cpu_dst_32 = gen_dest_fpr_F(dc);
4970 tcg_gen_movi_i32(cpu_dst_32, -1);
4971 gen_store_fpr_F(dc, rd, cpu_dst_32);
4972 break;
4973 case 0x080: /* VIS I shutdown */
4974 case 0x081: /* VIS II siam */
4975 // XXX
4976 goto illegal_insn;
4977 default:
4978 goto illegal_insn;
4980 #else
4981 goto ncp_insn;
4982 #endif
4983 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4984 #ifdef TARGET_SPARC64
4985 goto illegal_insn;
4986 #else
4987 goto ncp_insn;
4988 #endif
4989 #ifdef TARGET_SPARC64
4990 } else if (xop == 0x39) { /* V9 return */
4991 save_state(dc);
4992 cpu_src1 = get_src1(dc, insn);
4993 cpu_tmp0 = get_temp_tl(dc);
4994 if (IS_IMM) { /* immediate */
4995 simm = GET_FIELDs(insn, 19, 31);
4996 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4997 } else { /* register */
4998 rs2 = GET_FIELD(insn, 27, 31);
4999 if (rs2) {
5000 cpu_src2 = gen_load_gpr(dc, rs2);
5001 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5002 } else {
5003 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5006 gen_helper_restore(cpu_env);
5007 gen_mov_pc_npc(dc);
5008 gen_check_align(cpu_tmp0, 3);
5009 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5010 dc->npc = DYNAMIC_PC;
5011 goto jmp_insn;
5012 #endif
5013 } else {
5014 cpu_src1 = get_src1(dc, insn);
5015 cpu_tmp0 = get_temp_tl(dc);
5016 if (IS_IMM) { /* immediate */
5017 simm = GET_FIELDs(insn, 19, 31);
5018 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5019 } else { /* register */
5020 rs2 = GET_FIELD(insn, 27, 31);
5021 if (rs2) {
5022 cpu_src2 = gen_load_gpr(dc, rs2);
5023 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5024 } else {
5025 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5028 switch (xop) {
5029 case 0x38: /* jmpl */
5031 TCGv t = gen_dest_gpr(dc, rd);
5032 tcg_gen_movi_tl(t, dc->pc);
5033 gen_store_gpr(dc, rd, t);
5035 gen_mov_pc_npc(dc);
5036 gen_check_align(cpu_tmp0, 3);
5037 gen_address_mask(dc, cpu_tmp0);
5038 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5039 dc->npc = DYNAMIC_PC;
5041 goto jmp_insn;
5042 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5043 case 0x39: /* rett, V9 return */
5045 if (!supervisor(dc))
5046 goto priv_insn;
5047 gen_mov_pc_npc(dc);
5048 gen_check_align(cpu_tmp0, 3);
5049 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5050 dc->npc = DYNAMIC_PC;
5051 gen_helper_rett(cpu_env);
5053 goto jmp_insn;
5054 #endif
5055 case 0x3b: /* flush */
5056 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5057 goto unimp_flush;
5058 /* nop */
5059 break;
5060 case 0x3c: /* save */
5061 save_state(dc);
5062 gen_helper_save(cpu_env);
5063 gen_store_gpr(dc, rd, cpu_tmp0);
5064 break;
5065 case 0x3d: /* restore */
5066 save_state(dc);
5067 gen_helper_restore(cpu_env);
5068 gen_store_gpr(dc, rd, cpu_tmp0);
5069 break;
5070 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5071 case 0x3e: /* V9 done/retry */
5073 switch (rd) {
5074 case 0:
5075 if (!supervisor(dc))
5076 goto priv_insn;
5077 dc->npc = DYNAMIC_PC;
5078 dc->pc = DYNAMIC_PC;
5079 gen_helper_done(cpu_env);
5080 goto jmp_insn;
5081 case 1:
5082 if (!supervisor(dc))
5083 goto priv_insn;
5084 dc->npc = DYNAMIC_PC;
5085 dc->pc = DYNAMIC_PC;
5086 gen_helper_retry(cpu_env);
5087 goto jmp_insn;
5088 default:
5089 goto illegal_insn;
5092 break;
5093 #endif
5094 default:
5095 goto illegal_insn;
5098 break;
5100 break;
5101 case 3: /* load/store instructions */
5103 unsigned int xop = GET_FIELD(insn, 7, 12);
5104 /* ??? gen_address_mask prevents us from using a source
5105 register directly. Always generate a temporary. */
5106 TCGv cpu_addr = get_temp_tl(dc);
5108 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5109 if (xop == 0x3c || xop == 0x3e) {
5110 /* V9 casa/casxa : no offset */
5111 } else if (IS_IMM) { /* immediate */
5112 simm = GET_FIELDs(insn, 19, 31);
5113 if (simm != 0) {
5114 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5116 } else { /* register */
5117 rs2 = GET_FIELD(insn, 27, 31);
5118 if (rs2 != 0) {
5119 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5122 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5123 (xop > 0x17 && xop <= 0x1d ) ||
5124 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5125 TCGv cpu_val = gen_dest_gpr(dc, rd);
5127 switch (xop) {
5128 case 0x0: /* ld, V9 lduw, load unsigned word */
5129 gen_address_mask(dc, cpu_addr);
5130 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5131 break;
5132 case 0x1: /* ldub, load unsigned byte */
5133 gen_address_mask(dc, cpu_addr);
5134 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5135 break;
5136 case 0x2: /* lduh, load unsigned halfword */
5137 gen_address_mask(dc, cpu_addr);
5138 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5139 break;
5140 case 0x3: /* ldd, load double word */
5141 if (rd & 1)
5142 goto illegal_insn;
5143 else {
5144 TCGv_i64 t64;
5146 gen_address_mask(dc, cpu_addr);
5147 t64 = tcg_temp_new_i64();
5148 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5149 tcg_gen_trunc_i64_tl(cpu_val, t64);
5150 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5151 gen_store_gpr(dc, rd + 1, cpu_val);
5152 tcg_gen_shri_i64(t64, t64, 32);
5153 tcg_gen_trunc_i64_tl(cpu_val, t64);
5154 tcg_temp_free_i64(t64);
5155 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5157 break;
5158 case 0x9: /* ldsb, load signed byte */
5159 gen_address_mask(dc, cpu_addr);
5160 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5161 break;
5162 case 0xa: /* ldsh, load signed halfword */
5163 gen_address_mask(dc, cpu_addr);
5164 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5165 break;
5166 case 0xd: /* ldstub -- XXX: should be atomically */
5168 TCGv r_const;
5169 TCGv tmp = tcg_temp_new();
5171 gen_address_mask(dc, cpu_addr);
5172 tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx);
5173 r_const = tcg_const_tl(0xff);
5174 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
5175 tcg_gen_mov_tl(cpu_val, tmp);
5176 tcg_temp_free(r_const);
5177 tcg_temp_free(tmp);
5179 break;
5180 case 0x0f:
5181 /* swap, swap register with memory. Also atomically */
5183 TCGv t0 = get_temp_tl(dc);
5184 CHECK_IU_FEATURE(dc, SWAP);
5185 cpu_src1 = gen_load_gpr(dc, rd);
5186 gen_address_mask(dc, cpu_addr);
5187 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5188 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
5189 tcg_gen_mov_tl(cpu_val, t0);
5191 break;
5192 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5193 case 0x10: /* lda, V9 lduwa, load word alternate */
5194 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5195 break;
5196 case 0x11: /* lduba, load unsigned byte alternate */
5197 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5198 break;
5199 case 0x12: /* lduha, load unsigned halfword alternate */
5200 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5201 break;
5202 case 0x13: /* ldda, load double word alternate */
5203 if (rd & 1) {
5204 goto illegal_insn;
5206 gen_ldda_asi(dc, cpu_addr, insn, rd);
5207 goto skip_move;
5208 case 0x19: /* ldsba, load signed byte alternate */
5209 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5210 break;
5211 case 0x1a: /* ldsha, load signed halfword alternate */
5212 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5213 break;
5214 case 0x1d: /* ldstuba -- XXX: should be atomically */
5215 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5216 break;
5217 case 0x1f: /* swapa, swap reg with alt. memory. Also
5218 atomically */
5219 CHECK_IU_FEATURE(dc, SWAP);
5220 cpu_src1 = gen_load_gpr(dc, rd);
5221 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5222 break;
5224 #ifndef TARGET_SPARC64
5225 case 0x30: /* ldc */
5226 case 0x31: /* ldcsr */
5227 case 0x33: /* lddc */
5228 goto ncp_insn;
5229 #endif
5230 #endif
5231 #ifdef TARGET_SPARC64
5232 case 0x08: /* V9 ldsw */
5233 gen_address_mask(dc, cpu_addr);
5234 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5235 break;
5236 case 0x0b: /* V9 ldx */
5237 gen_address_mask(dc, cpu_addr);
5238 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5239 break;
5240 case 0x18: /* V9 ldswa */
5241 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5242 break;
5243 case 0x1b: /* V9 ldxa */
5244 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5245 break;
5246 case 0x2d: /* V9 prefetch, no effect */
5247 goto skip_move;
5248 case 0x30: /* V9 ldfa */
5249 if (gen_trap_ifnofpu(dc)) {
5250 goto jmp_insn;
5252 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5253 gen_update_fprs_dirty(dc, rd);
5254 goto skip_move;
5255 case 0x33: /* V9 lddfa */
5256 if (gen_trap_ifnofpu(dc)) {
5257 goto jmp_insn;
5259 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5260 gen_update_fprs_dirty(dc, DFPREG(rd));
5261 goto skip_move;
5262 case 0x3d: /* V9 prefetcha, no effect */
5263 goto skip_move;
5264 case 0x32: /* V9 ldqfa */
5265 CHECK_FPU_FEATURE(dc, FLOAT128);
5266 if (gen_trap_ifnofpu(dc)) {
5267 goto jmp_insn;
5269 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5270 gen_update_fprs_dirty(dc, QFPREG(rd));
5271 goto skip_move;
5272 #endif
5273 default:
5274 goto illegal_insn;
5276 gen_store_gpr(dc, rd, cpu_val);
5277 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5278 skip_move: ;
5279 #endif
5280 } else if (xop >= 0x20 && xop < 0x24) {
5281 TCGv t0;
5283 if (gen_trap_ifnofpu(dc)) {
5284 goto jmp_insn;
5286 switch (xop) {
5287 case 0x20: /* ldf, load fpreg */
5288 gen_address_mask(dc, cpu_addr);
5289 t0 = get_temp_tl(dc);
5290 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5291 cpu_dst_32 = gen_dest_fpr_F(dc);
5292 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5293 gen_store_fpr_F(dc, rd, cpu_dst_32);
5294 break;
5295 case 0x21: /* ldfsr, V9 ldxfsr */
5296 #ifdef TARGET_SPARC64
5297 gen_address_mask(dc, cpu_addr);
5298 if (rd == 1) {
5299 TCGv_i64 t64 = tcg_temp_new_i64();
5300 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5301 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5302 tcg_temp_free_i64(t64);
5303 break;
5305 #endif
5306 cpu_dst_32 = get_temp_i32(dc);
5307 t0 = get_temp_tl(dc);
5308 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
5309 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
5310 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5311 break;
5312 case 0x22: /* ldqf, load quad fpreg */
5314 TCGv_i32 r_const;
5316 CHECK_FPU_FEATURE(dc, FLOAT128);
5317 r_const = tcg_const_i32(dc->mem_idx);
5318 gen_address_mask(dc, cpu_addr);
5319 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
5320 tcg_temp_free_i32(r_const);
5321 gen_op_store_QT0_fpr(QFPREG(rd));
5322 gen_update_fprs_dirty(dc, QFPREG(rd));
5324 break;
5325 case 0x23: /* lddf, load double fpreg */
5326 gen_address_mask(dc, cpu_addr);
5327 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5328 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
5329 gen_store_fpr_D(dc, rd, cpu_dst_64);
5330 break;
5331 default:
5332 goto illegal_insn;
5334 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5335 xop == 0xe || xop == 0x1e) {
5336 TCGv cpu_val = gen_load_gpr(dc, rd);
5338 switch (xop) {
5339 case 0x4: /* st, store word */
5340 gen_address_mask(dc, cpu_addr);
5341 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5342 break;
5343 case 0x5: /* stb, store byte */
5344 gen_address_mask(dc, cpu_addr);
5345 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5346 break;
5347 case 0x6: /* sth, store halfword */
5348 gen_address_mask(dc, cpu_addr);
5349 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5350 break;
5351 case 0x7: /* std, store double word */
5352 if (rd & 1)
5353 goto illegal_insn;
5354 else {
5355 TCGv_i64 t64;
5356 TCGv lo;
5358 gen_address_mask(dc, cpu_addr);
5359 lo = gen_load_gpr(dc, rd + 1);
5360 t64 = tcg_temp_new_i64();
5361 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5362 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5363 tcg_temp_free_i64(t64);
5365 break;
5366 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5367 case 0x14: /* sta, V9 stwa, store word alternate */
5368 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5369 break;
5370 case 0x15: /* stba, store byte alternate */
5371 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5372 break;
5373 case 0x16: /* stha, store halfword alternate */
5374 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5375 break;
5376 case 0x17: /* stda, store double word alternate */
5377 if (rd & 1) {
5378 goto illegal_insn;
5380 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5381 break;
5382 #endif
5383 #ifdef TARGET_SPARC64
5384 case 0x0e: /* V9 stx */
5385 gen_address_mask(dc, cpu_addr);
5386 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5387 break;
5388 case 0x1e: /* V9 stxa */
5389 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5390 break;
5391 #endif
5392 default:
5393 goto illegal_insn;
5395 } else if (xop > 0x23 && xop < 0x28) {
5396 if (gen_trap_ifnofpu(dc)) {
5397 goto jmp_insn;
5399 switch (xop) {
5400 case 0x24: /* stf, store fpreg */
5402 TCGv t = get_temp_tl(dc);
5403 gen_address_mask(dc, cpu_addr);
5404 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5405 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5406 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5408 break;
5409 case 0x25: /* stfsr, V9 stxfsr */
5411 #ifdef TARGET_SPARC64
5412 gen_address_mask(dc, cpu_addr);
5413 if (rd == 1) {
5414 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5415 break;
5417 #endif
5418 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5420 break;
5421 case 0x26:
5422 #ifdef TARGET_SPARC64
5423 /* V9 stqf, store quad fpreg */
5425 TCGv_i32 r_const;
5427 CHECK_FPU_FEATURE(dc, FLOAT128);
5428 gen_op_load_fpr_QT0(QFPREG(rd));
5429 r_const = tcg_const_i32(dc->mem_idx);
5430 gen_address_mask(dc, cpu_addr);
5431 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5432 tcg_temp_free_i32(r_const);
5434 break;
5435 #else /* !TARGET_SPARC64 */
5436 /* stdfq, store floating point queue */
5437 #if defined(CONFIG_USER_ONLY)
5438 goto illegal_insn;
5439 #else
5440 if (!supervisor(dc))
5441 goto priv_insn;
5442 if (gen_trap_ifnofpu(dc)) {
5443 goto jmp_insn;
5445 goto nfq_insn;
5446 #endif
5447 #endif
5448 case 0x27: /* stdf, store double fpreg */
5449 gen_address_mask(dc, cpu_addr);
5450 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5451 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5452 break;
5453 default:
5454 goto illegal_insn;
5456 } else if (xop > 0x33 && xop < 0x3f) {
5457 switch (xop) {
5458 #ifdef TARGET_SPARC64
5459 case 0x34: /* V9 stfa */
5460 if (gen_trap_ifnofpu(dc)) {
5461 goto jmp_insn;
5463 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5464 break;
5465 case 0x36: /* V9 stqfa */
5467 CHECK_FPU_FEATURE(dc, FLOAT128);
5468 if (gen_trap_ifnofpu(dc)) {
5469 goto jmp_insn;
5471 gen_check_align(cpu_addr, 7);
5472 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5474 break;
5475 case 0x37: /* V9 stdfa */
5476 if (gen_trap_ifnofpu(dc)) {
5477 goto jmp_insn;
5479 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5480 break;
5481 case 0x3e: /* V9 casxa */
5482 rs2 = GET_FIELD(insn, 27, 31);
5483 cpu_src2 = gen_load_gpr(dc, rs2);
5484 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5485 break;
5486 #else
5487 case 0x34: /* stc */
5488 case 0x35: /* stcsr */
5489 case 0x36: /* stdcq */
5490 case 0x37: /* stdc */
5491 goto ncp_insn;
5492 #endif
5493 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5494 case 0x3c: /* V9 or LEON3 casa */
5495 #ifndef TARGET_SPARC64
5496 CHECK_IU_FEATURE(dc, CASA);
5497 #endif
5498 rs2 = GET_FIELD(insn, 27, 31);
5499 cpu_src2 = gen_load_gpr(dc, rs2);
5500 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5501 break;
5502 #endif
5503 default:
5504 goto illegal_insn;
5506 } else {
5507 goto illegal_insn;
5510 break;
5512 /* default case for non jump instructions */
5513 if (dc->npc == DYNAMIC_PC) {
5514 dc->pc = DYNAMIC_PC;
5515 gen_op_next_insn();
5516 } else if (dc->npc == JUMP_PC) {
5517 /* we can do a static jump */
5518 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5519 dc->is_br = 1;
5520 } else {
5521 dc->pc = dc->npc;
5522 dc->npc = dc->npc + 4;
5524 jmp_insn:
5525 goto egress;
5526 illegal_insn:
5527 gen_exception(dc, TT_ILL_INSN);
5528 goto egress;
5529 unimp_flush:
5530 gen_exception(dc, TT_UNIMP_FLUSH);
5531 goto egress;
5532 #if !defined(CONFIG_USER_ONLY)
5533 priv_insn:
5534 gen_exception(dc, TT_PRIV_INSN);
5535 goto egress;
5536 #endif
5537 nfpu_insn:
5538 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5539 goto egress;
5540 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5541 nfq_insn:
5542 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5543 goto egress;
5544 #endif
5545 #ifndef TARGET_SPARC64
5546 ncp_insn:
5547 gen_exception(dc, TT_NCP_INSN);
5548 goto egress;
5549 #endif
5550 egress:
5551 if (dc->n_t32 != 0) {
5552 int i;
5553 for (i = dc->n_t32 - 1; i >= 0; --i) {
5554 tcg_temp_free_i32(dc->t32[i]);
5556 dc->n_t32 = 0;
5558 if (dc->n_ttl != 0) {
5559 int i;
5560 for (i = dc->n_ttl - 1; i >= 0; --i) {
5561 tcg_temp_free(dc->ttl[i]);
5563 dc->n_ttl = 0;
5567 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5569 SPARCCPU *cpu = sparc_env_get_cpu(env);
5570 CPUState *cs = CPU(cpu);
5571 target_ulong pc_start, last_pc;
5572 DisasContext dc1, *dc = &dc1;
5573 int num_insns;
5574 int max_insns;
5575 unsigned int insn;
5577 memset(dc, 0, sizeof(DisasContext));
5578 dc->tb = tb;
5579 pc_start = tb->pc;
5580 dc->pc = pc_start;
5581 last_pc = dc->pc;
5582 dc->npc = (target_ulong) tb->cs_base;
5583 dc->cc_op = CC_OP_DYNAMIC;
5584 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5585 dc->def = env->def;
5586 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5587 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5588 dc->singlestep = (cs->singlestep_enabled || singlestep);
5589 #ifdef TARGET_SPARC64
5590 dc->fprs_dirty = 0;
5591 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5592 #endif
5594 num_insns = 0;
5595 max_insns = tb->cflags & CF_COUNT_MASK;
5596 if (max_insns == 0) {
5597 max_insns = CF_COUNT_MASK;
5599 if (max_insns > TCG_MAX_INSNS) {
5600 max_insns = TCG_MAX_INSNS;
5603 gen_tb_start(tb);
5604 do {
5605 if (dc->npc & JUMP_PC) {
5606 assert(dc->jump_pc[1] == dc->pc + 4);
5607 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5608 } else {
5609 tcg_gen_insn_start(dc->pc, dc->npc);
5611 num_insns++;
5612 last_pc = dc->pc;
5614 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5615 if (dc->pc != pc_start) {
5616 save_state(dc);
5618 gen_helper_debug(cpu_env);
5619 tcg_gen_exit_tb(0);
5620 dc->is_br = 1;
5621 goto exit_gen_loop;
5624 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5625 gen_io_start();
5628 insn = cpu_ldl_code(env, dc->pc);
5630 disas_sparc_insn(dc, insn);
5632 if (dc->is_br)
5633 break;
5634 /* if the next PC is different, we abort now */
5635 if (dc->pc != (last_pc + 4))
5636 break;
5637 /* if we reach a page boundary, we stop generation so that the
5638 PC of a TT_TFAULT exception is always in the right page */
5639 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5640 break;
5641 /* if single step mode, we generate only one instruction and
5642 generate an exception */
5643 if (dc->singlestep) {
5644 break;
5646 } while (!tcg_op_buf_full() &&
5647 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5648 num_insns < max_insns);
5650 exit_gen_loop:
5651 if (tb->cflags & CF_LAST_IO) {
5652 gen_io_end();
5654 if (!dc->is_br) {
5655 if (dc->pc != DYNAMIC_PC &&
5656 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5657 /* static PC and NPC: we can use direct chaining */
5658 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5659 } else {
5660 if (dc->pc != DYNAMIC_PC) {
5661 tcg_gen_movi_tl(cpu_pc, dc->pc);
5663 save_npc(dc);
5664 tcg_gen_exit_tb(0);
5667 gen_tb_end(tb, num_insns);
5669 tb->size = last_pc + 4 - pc_start;
5670 tb->icount = num_insns;
5672 #ifdef DEBUG_DISAS
5673 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5674 && qemu_log_in_addr_range(pc_start)) {
5675 qemu_log("--------------\n");
5676 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5677 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5678 qemu_log("\n");
5680 #endif
5683 void gen_intermediate_code_init(CPUSPARCState *env)
5685 static int inited;
5686 static const char gregnames[32][4] = {
5687 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5688 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5689 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5690 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5692 static const char fregnames[32][4] = {
5693 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5694 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5695 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5696 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5699 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5700 #ifdef TARGET_SPARC64
5701 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5702 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5703 #else
5704 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5705 #endif
5706 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5707 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5710 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5711 #ifdef TARGET_SPARC64
5712 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5713 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5714 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5715 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5716 "hstick_cmpr" },
5717 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5718 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5719 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5720 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5721 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5722 #endif
5723 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5724 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5725 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5726 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5727 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5728 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5729 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5730 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5731 #ifndef CONFIG_USER_ONLY
5732 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5733 #endif
5736 unsigned int i;
5738 /* init various static tables */
5739 if (inited) {
5740 return;
5742 inited = 1;
5744 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5745 tcg_ctx.tcg_env = cpu_env;
5747 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5748 offsetof(CPUSPARCState, regwptr),
5749 "regwptr");
5751 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5752 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5755 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5756 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5759 TCGV_UNUSED(cpu_regs[0]);
5760 for (i = 1; i < 8; ++i) {
5761 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5762 offsetof(CPUSPARCState, gregs[i]),
5763 gregnames[i]);
5766 for (i = 8; i < 32; ++i) {
5767 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5768 (i - 8) * sizeof(target_ulong),
5769 gregnames[i]);
5772 for (i = 0; i < TARGET_DPREGS; i++) {
5773 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5774 offsetof(CPUSPARCState, fpr[i]),
5775 fregnames[i]);
5779 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5780 target_ulong *data)
5782 target_ulong pc = data[0];
5783 target_ulong npc = data[1];
5785 env->pc = pc;
5786 if (npc == DYNAMIC_PC) {
5787 /* dynamic NPC: already stored */
5788 } else if (npc & JUMP_PC) {
5789 /* jump PC: use 'cond' and the jump targets of the translation */
5790 if (env->cond) {
5791 env->npc = npc & ~3;
5792 } else {
5793 env->npc = pc + 4;
5795 } else {
5796 env->npc = npc;