input: use kbd delays for send_key monitor command
[qemu.git] / target-sparc / translate.c
blob652a181763e24586729f89987e6edd936eedf8a1
1 /*
2 SPARC translation
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
30 #include "tcg-op.h"
32 #include "exec/helper-gen.h"
34 #define DEBUG_DISAS
36 #define DYNAMIC_PC 1 /* dynamic pc value */
37 #define JUMP_PC 2 /* dynamic pc value which takes only two values
38 according to jump_pc[T2] */
40 /* global register indexes */
41 static TCGv_ptr cpu_env, cpu_regwptr;
42 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
43 static TCGv_i32 cpu_cc_op;
44 static TCGv_i32 cpu_psr;
45 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
46 static TCGv cpu_y;
47 #ifndef CONFIG_USER_ONLY
48 static TCGv cpu_tbr;
49 #endif
50 static TCGv cpu_cond;
51 #ifdef TARGET_SPARC64
52 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
53 static TCGv cpu_gsr;
54 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
55 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
56 static TCGv_i32 cpu_softint;
57 #else
58 static TCGv cpu_wim;
59 #endif
60 /* Floating point registers */
61 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
63 static target_ulong gen_opc_npc[OPC_BUF_SIZE];
64 static target_ulong gen_opc_jump_pc[2];
66 #include "exec/gen-icount.h"
68 typedef struct DisasContext {
69 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
70 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
71 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
72 int is_br;
73 int mem_idx;
74 int fpu_enabled;
75 int address_mask_32bit;
76 int singlestep;
77 uint32_t cc_op; /* current CC operation */
78 struct TranslationBlock *tb;
79 sparc_def_t *def;
80 TCGv_i32 t32[3];
81 TCGv ttl[5];
82 int n_t32;
83 int n_ttl;
84 } DisasContext;
86 typedef struct {
87 TCGCond cond;
88 bool is_bool;
89 bool g1, g2;
90 TCGv c1, c2;
91 } DisasCompare;
93 // This function uses non-native bit order
94 #define GET_FIELD(X, FROM, TO) \
95 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
97 // This function uses the order in the manuals, i.e. bit 0 is 2^0
98 #define GET_FIELD_SP(X, FROM, TO) \
99 GET_FIELD(X, 31 - (TO), 31 - (FROM))
101 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
102 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
104 #ifdef TARGET_SPARC64
105 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
106 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
107 #else
108 #define DFPREG(r) (r & 0x1e)
109 #define QFPREG(r) (r & 0x1c)
110 #endif
112 #define UA2005_HTRAP_MASK 0xff
113 #define V8_TRAP_MASK 0x7f
115 static int sign_extend(int x, int len)
117 len = 32 - len;
118 return (x << len) >> len;
121 #define IS_IMM (insn & (1<<13))
123 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
125 TCGv_i32 t;
126 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
127 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
128 return t;
131 static inline TCGv get_temp_tl(DisasContext *dc)
133 TCGv t;
134 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
135 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
136 return t;
139 static inline void gen_update_fprs_dirty(int rd)
141 #if defined(TARGET_SPARC64)
142 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
143 #endif
146 /* floating point registers moves */
147 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
149 #if TCG_TARGET_REG_BITS == 32
150 if (src & 1) {
151 return TCGV_LOW(cpu_fpr[src / 2]);
152 } else {
153 return TCGV_HIGH(cpu_fpr[src / 2]);
155 #else
156 if (src & 1) {
157 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
158 } else {
159 TCGv_i32 ret = get_temp_i32(dc);
160 TCGv_i64 t = tcg_temp_new_i64();
162 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
163 tcg_gen_trunc_i64_i32(ret, t);
164 tcg_temp_free_i64(t);
166 return ret;
168 #endif
171 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
173 #if TCG_TARGET_REG_BITS == 32
174 if (dst & 1) {
175 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
176 } else {
177 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
179 #else
180 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
181 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
182 (dst & 1 ? 0 : 32), 32);
183 #endif
184 gen_update_fprs_dirty(dst);
187 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
189 return get_temp_i32(dc);
192 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
194 src = DFPREG(src);
195 return cpu_fpr[src / 2];
198 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
200 dst = DFPREG(dst);
201 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
202 gen_update_fprs_dirty(dst);
205 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
207 return cpu_fpr[DFPREG(dst) / 2];
210 static void gen_op_load_fpr_QT0(unsigned int src)
212 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
213 offsetof(CPU_QuadU, ll.upper));
214 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
215 offsetof(CPU_QuadU, ll.lower));
218 static void gen_op_load_fpr_QT1(unsigned int src)
220 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
221 offsetof(CPU_QuadU, ll.upper));
222 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
223 offsetof(CPU_QuadU, ll.lower));
226 static void gen_op_store_QT0_fpr(unsigned int dst)
228 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
229 offsetof(CPU_QuadU, ll.upper));
230 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.lower));
234 #ifdef TARGET_SPARC64
235 static void gen_move_Q(unsigned int rd, unsigned int rs)
237 rd = QFPREG(rd);
238 rs = QFPREG(rs);
240 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
241 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
242 gen_update_fprs_dirty(rd);
244 #endif
246 /* moves */
247 #ifdef CONFIG_USER_ONLY
248 #define supervisor(dc) 0
249 #ifdef TARGET_SPARC64
250 #define hypervisor(dc) 0
251 #endif
252 #else
253 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
256 #else
257 #endif
258 #endif
260 #ifdef TARGET_SPARC64
261 #ifndef TARGET_ABI32
262 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
263 #else
264 #define AM_CHECK(dc) (1)
265 #endif
266 #endif
268 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
270 #ifdef TARGET_SPARC64
271 if (AM_CHECK(dc))
272 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
273 #endif
276 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
278 if (reg == 0 || reg >= 8) {
279 TCGv t = get_temp_tl(dc);
280 if (reg == 0) {
281 tcg_gen_movi_tl(t, 0);
282 } else {
283 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
285 return t;
286 } else {
287 return cpu_gregs[reg];
291 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
293 if (reg > 0) {
294 if (reg < 8) {
295 tcg_gen_mov_tl(cpu_gregs[reg], v);
296 } else {
297 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
302 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
304 if (reg == 0 || reg >= 8) {
305 return get_temp_tl(dc);
306 } else {
307 return cpu_gregs[reg];
311 static inline void gen_goto_tb(DisasContext *s, int tb_num,
312 target_ulong pc, target_ulong npc)
314 TranslationBlock *tb;
316 tb = s->tb;
317 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
318 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
319 !s->singlestep) {
320 /* jump to same page: we can use a direct jump */
321 tcg_gen_goto_tb(tb_num);
322 tcg_gen_movi_tl(cpu_pc, pc);
323 tcg_gen_movi_tl(cpu_npc, npc);
324 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
325 } else {
326 /* jump to another page: currently not optimized */
327 tcg_gen_movi_tl(cpu_pc, pc);
328 tcg_gen_movi_tl(cpu_npc, npc);
329 tcg_gen_exit_tb(0);
333 // XXX suboptimal
334 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
336 tcg_gen_extu_i32_tl(reg, src);
337 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
338 tcg_gen_andi_tl(reg, reg, 0x1);
341 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
343 tcg_gen_extu_i32_tl(reg, src);
344 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
345 tcg_gen_andi_tl(reg, reg, 0x1);
348 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
350 tcg_gen_extu_i32_tl(reg, src);
351 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
352 tcg_gen_andi_tl(reg, reg, 0x1);
355 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
357 tcg_gen_extu_i32_tl(reg, src);
358 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
359 tcg_gen_andi_tl(reg, reg, 0x1);
362 static inline void gen_op_addi_cc(TCGv dst, TCGv src1, target_long src2)
364 tcg_gen_mov_tl(cpu_cc_src, src1);
365 tcg_gen_movi_tl(cpu_cc_src2, src2);
366 tcg_gen_addi_tl(cpu_cc_dst, cpu_cc_src, src2);
367 tcg_gen_mov_tl(dst, cpu_cc_dst);
370 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
372 tcg_gen_mov_tl(cpu_cc_src, src1);
373 tcg_gen_mov_tl(cpu_cc_src2, src2);
374 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
375 tcg_gen_mov_tl(dst, cpu_cc_dst);
378 static TCGv_i32 gen_add32_carry32(void)
380 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
382 /* Carry is computed from a previous add: (dst < src) */
383 #if TARGET_LONG_BITS == 64
384 cc_src1_32 = tcg_temp_new_i32();
385 cc_src2_32 = tcg_temp_new_i32();
386 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst);
387 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src);
388 #else
389 cc_src1_32 = cpu_cc_dst;
390 cc_src2_32 = cpu_cc_src;
391 #endif
393 carry_32 = tcg_temp_new_i32();
394 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
396 #if TARGET_LONG_BITS == 64
397 tcg_temp_free_i32(cc_src1_32);
398 tcg_temp_free_i32(cc_src2_32);
399 #endif
401 return carry_32;
404 static TCGv_i32 gen_sub32_carry32(void)
406 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
408 /* Carry is computed from a previous borrow: (src1 < src2) */
409 #if TARGET_LONG_BITS == 64
410 cc_src1_32 = tcg_temp_new_i32();
411 cc_src2_32 = tcg_temp_new_i32();
412 tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src);
413 tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2);
414 #else
415 cc_src1_32 = cpu_cc_src;
416 cc_src2_32 = cpu_cc_src2;
417 #endif
419 carry_32 = tcg_temp_new_i32();
420 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
422 #if TARGET_LONG_BITS == 64
423 tcg_temp_free_i32(cc_src1_32);
424 tcg_temp_free_i32(cc_src2_32);
425 #endif
427 return carry_32;
430 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
431 TCGv src2, int update_cc)
433 TCGv_i32 carry_32;
434 TCGv carry;
436 switch (dc->cc_op) {
437 case CC_OP_DIV:
438 case CC_OP_LOGIC:
439 /* Carry is known to be zero. Fall back to plain ADD. */
440 if (update_cc) {
441 gen_op_add_cc(dst, src1, src2);
442 } else {
443 tcg_gen_add_tl(dst, src1, src2);
445 return;
447 case CC_OP_ADD:
448 case CC_OP_TADD:
449 case CC_OP_TADDTV:
450 if (TARGET_LONG_BITS == 32) {
451 /* We can re-use the host's hardware carry generation by using
452 an ADD2 opcode. We discard the low part of the output.
453 Ideally we'd combine this operation with the add that
454 generated the carry in the first place. */
455 carry = tcg_temp_new();
456 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
457 tcg_temp_free(carry);
458 goto add_done;
460 carry_32 = gen_add32_carry32();
461 break;
463 case CC_OP_SUB:
464 case CC_OP_TSUB:
465 case CC_OP_TSUBTV:
466 carry_32 = gen_sub32_carry32();
467 break;
469 default:
470 /* We need external help to produce the carry. */
471 carry_32 = tcg_temp_new_i32();
472 gen_helper_compute_C_icc(carry_32, cpu_env);
473 break;
476 #if TARGET_LONG_BITS == 64
477 carry = tcg_temp_new();
478 tcg_gen_extu_i32_i64(carry, carry_32);
479 #else
480 carry = carry_32;
481 #endif
483 tcg_gen_add_tl(dst, src1, src2);
484 tcg_gen_add_tl(dst, dst, carry);
486 tcg_temp_free_i32(carry_32);
487 #if TARGET_LONG_BITS == 64
488 tcg_temp_free(carry);
489 #endif
491 add_done:
492 if (update_cc) {
493 tcg_gen_mov_tl(cpu_cc_src, src1);
494 tcg_gen_mov_tl(cpu_cc_src2, src2);
495 tcg_gen_mov_tl(cpu_cc_dst, dst);
496 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
497 dc->cc_op = CC_OP_ADDX;
501 static inline void gen_op_subi_cc(TCGv dst, TCGv src1, target_long src2, DisasContext *dc)
503 tcg_gen_mov_tl(cpu_cc_src, src1);
504 tcg_gen_movi_tl(cpu_cc_src2, src2);
505 if (src2 == 0) {
506 tcg_gen_mov_tl(cpu_cc_dst, src1);
507 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
508 dc->cc_op = CC_OP_LOGIC;
509 } else {
510 tcg_gen_subi_tl(cpu_cc_dst, cpu_cc_src, src2);
511 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
512 dc->cc_op = CC_OP_SUB;
514 tcg_gen_mov_tl(dst, cpu_cc_dst);
517 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
519 tcg_gen_mov_tl(cpu_cc_src, src1);
520 tcg_gen_mov_tl(cpu_cc_src2, src2);
521 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
522 tcg_gen_mov_tl(dst, cpu_cc_dst);
525 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
526 TCGv src2, int update_cc)
528 TCGv_i32 carry_32;
529 TCGv carry;
531 switch (dc->cc_op) {
532 case CC_OP_DIV:
533 case CC_OP_LOGIC:
534 /* Carry is known to be zero. Fall back to plain SUB. */
535 if (update_cc) {
536 gen_op_sub_cc(dst, src1, src2);
537 } else {
538 tcg_gen_sub_tl(dst, src1, src2);
540 return;
542 case CC_OP_ADD:
543 case CC_OP_TADD:
544 case CC_OP_TADDTV:
545 carry_32 = gen_add32_carry32();
546 break;
548 case CC_OP_SUB:
549 case CC_OP_TSUB:
550 case CC_OP_TSUBTV:
551 if (TARGET_LONG_BITS == 32) {
552 /* We can re-use the host's hardware carry generation by using
553 a SUB2 opcode. We discard the low part of the output.
554 Ideally we'd combine this operation with the add that
555 generated the carry in the first place. */
556 carry = tcg_temp_new();
557 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
558 tcg_temp_free(carry);
559 goto sub_done;
561 carry_32 = gen_sub32_carry32();
562 break;
564 default:
565 /* We need external help to produce the carry. */
566 carry_32 = tcg_temp_new_i32();
567 gen_helper_compute_C_icc(carry_32, cpu_env);
568 break;
571 #if TARGET_LONG_BITS == 64
572 carry = tcg_temp_new();
573 tcg_gen_extu_i32_i64(carry, carry_32);
574 #else
575 carry = carry_32;
576 #endif
578 tcg_gen_sub_tl(dst, src1, src2);
579 tcg_gen_sub_tl(dst, dst, carry);
581 tcg_temp_free_i32(carry_32);
582 #if TARGET_LONG_BITS == 64
583 tcg_temp_free(carry);
584 #endif
586 sub_done:
587 if (update_cc) {
588 tcg_gen_mov_tl(cpu_cc_src, src1);
589 tcg_gen_mov_tl(cpu_cc_src2, src2);
590 tcg_gen_mov_tl(cpu_cc_dst, dst);
591 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
592 dc->cc_op = CC_OP_SUBX;
596 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
598 TCGv r_temp, zero, t0;
600 r_temp = tcg_temp_new();
601 t0 = tcg_temp_new();
603 /* old op:
604 if (!(env->y & 1))
605 T1 = 0;
607 zero = tcg_const_tl(0);
608 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
609 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
610 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
611 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
612 zero, cpu_cc_src2);
613 tcg_temp_free(zero);
615 // b2 = T0 & 1;
616 // env->y = (b2 << 31) | (env->y >> 1);
617 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
618 tcg_gen_shli_tl(r_temp, r_temp, 31);
619 tcg_gen_shri_tl(t0, cpu_y, 1);
620 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
621 tcg_gen_or_tl(t0, t0, r_temp);
622 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
624 // b1 = N ^ V;
625 gen_mov_reg_N(t0, cpu_psr);
626 gen_mov_reg_V(r_temp, cpu_psr);
627 tcg_gen_xor_tl(t0, t0, r_temp);
628 tcg_temp_free(r_temp);
630 // T0 = (b1 << 31) | (T0 >> 1);
631 // src1 = T0;
632 tcg_gen_shli_tl(t0, t0, 31);
633 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
634 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
635 tcg_temp_free(t0);
637 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
639 tcg_gen_mov_tl(dst, cpu_cc_dst);
642 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
644 #if TARGET_LONG_BITS == 32
645 if (sign_ext) {
646 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
647 } else {
648 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
650 #else
651 TCGv t0 = tcg_temp_new_i64();
652 TCGv t1 = tcg_temp_new_i64();
654 if (sign_ext) {
655 tcg_gen_ext32s_i64(t0, src1);
656 tcg_gen_ext32s_i64(t1, src2);
657 } else {
658 tcg_gen_ext32u_i64(t0, src1);
659 tcg_gen_ext32u_i64(t1, src2);
662 tcg_gen_mul_i64(dst, t0, t1);
663 tcg_temp_free(t0);
664 tcg_temp_free(t1);
666 tcg_gen_shri_i64(cpu_y, dst, 32);
667 #endif
670 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
672 /* zero-extend truncated operands before multiplication */
673 gen_op_multiply(dst, src1, src2, 0);
676 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
678 /* sign-extend truncated operands before multiplication */
679 gen_op_multiply(dst, src1, src2, 1);
682 // 1
683 static inline void gen_op_eval_ba(TCGv dst)
685 tcg_gen_movi_tl(dst, 1);
688 // Z
689 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
691 gen_mov_reg_Z(dst, src);
694 // Z | (N ^ V)
695 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
697 TCGv t0 = tcg_temp_new();
698 gen_mov_reg_N(t0, src);
699 gen_mov_reg_V(dst, src);
700 tcg_gen_xor_tl(dst, dst, t0);
701 gen_mov_reg_Z(t0, src);
702 tcg_gen_or_tl(dst, dst, t0);
703 tcg_temp_free(t0);
706 // N ^ V
707 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
709 TCGv t0 = tcg_temp_new();
710 gen_mov_reg_V(t0, src);
711 gen_mov_reg_N(dst, src);
712 tcg_gen_xor_tl(dst, dst, t0);
713 tcg_temp_free(t0);
716 // C | Z
717 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
719 TCGv t0 = tcg_temp_new();
720 gen_mov_reg_Z(t0, src);
721 gen_mov_reg_C(dst, src);
722 tcg_gen_or_tl(dst, dst, t0);
723 tcg_temp_free(t0);
726 // C
727 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
729 gen_mov_reg_C(dst, src);
732 // V
733 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
735 gen_mov_reg_V(dst, src);
738 // 0
739 static inline void gen_op_eval_bn(TCGv dst)
741 tcg_gen_movi_tl(dst, 0);
744 // N
745 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
747 gen_mov_reg_N(dst, src);
750 // !Z
751 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
753 gen_mov_reg_Z(dst, src);
754 tcg_gen_xori_tl(dst, dst, 0x1);
757 // !(Z | (N ^ V))
758 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
760 gen_op_eval_ble(dst, src);
761 tcg_gen_xori_tl(dst, dst, 0x1);
764 // !(N ^ V)
765 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
767 gen_op_eval_bl(dst, src);
768 tcg_gen_xori_tl(dst, dst, 0x1);
771 // !(C | Z)
772 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
774 gen_op_eval_bleu(dst, src);
775 tcg_gen_xori_tl(dst, dst, 0x1);
778 // !C
779 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
781 gen_mov_reg_C(dst, src);
782 tcg_gen_xori_tl(dst, dst, 0x1);
785 // !N
786 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
788 gen_mov_reg_N(dst, src);
789 tcg_gen_xori_tl(dst, dst, 0x1);
792 // !V
793 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
795 gen_mov_reg_V(dst, src);
796 tcg_gen_xori_tl(dst, dst, 0x1);
800 FPSR bit field FCC1 | FCC0:
804 3 unordered
806 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
807 unsigned int fcc_offset)
809 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
810 tcg_gen_andi_tl(reg, reg, 0x1);
813 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
814 unsigned int fcc_offset)
816 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
817 tcg_gen_andi_tl(reg, reg, 0x1);
820 // !0: FCC0 | FCC1
821 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
822 unsigned int fcc_offset)
824 TCGv t0 = tcg_temp_new();
825 gen_mov_reg_FCC0(dst, src, fcc_offset);
826 gen_mov_reg_FCC1(t0, src, fcc_offset);
827 tcg_gen_or_tl(dst, dst, t0);
828 tcg_temp_free(t0);
831 // 1 or 2: FCC0 ^ FCC1
832 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
833 unsigned int fcc_offset)
835 TCGv t0 = tcg_temp_new();
836 gen_mov_reg_FCC0(dst, src, fcc_offset);
837 gen_mov_reg_FCC1(t0, src, fcc_offset);
838 tcg_gen_xor_tl(dst, dst, t0);
839 tcg_temp_free(t0);
842 // 1 or 3: FCC0
843 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
846 gen_mov_reg_FCC0(dst, src, fcc_offset);
849 // 1: FCC0 & !FCC1
850 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
851 unsigned int fcc_offset)
853 TCGv t0 = tcg_temp_new();
854 gen_mov_reg_FCC0(dst, src, fcc_offset);
855 gen_mov_reg_FCC1(t0, src, fcc_offset);
856 tcg_gen_andc_tl(dst, dst, t0);
857 tcg_temp_free(t0);
860 // 2 or 3: FCC1
861 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
862 unsigned int fcc_offset)
864 gen_mov_reg_FCC1(dst, src, fcc_offset);
867 // 2: !FCC0 & FCC1
868 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
869 unsigned int fcc_offset)
871 TCGv t0 = tcg_temp_new();
872 gen_mov_reg_FCC0(dst, src, fcc_offset);
873 gen_mov_reg_FCC1(t0, src, fcc_offset);
874 tcg_gen_andc_tl(dst, t0, dst);
875 tcg_temp_free(t0);
878 // 3: FCC0 & FCC1
879 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
880 unsigned int fcc_offset)
882 TCGv t0 = tcg_temp_new();
883 gen_mov_reg_FCC0(dst, src, fcc_offset);
884 gen_mov_reg_FCC1(t0, src, fcc_offset);
885 tcg_gen_and_tl(dst, dst, t0);
886 tcg_temp_free(t0);
889 // 0: !(FCC0 | FCC1)
890 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
891 unsigned int fcc_offset)
893 TCGv t0 = tcg_temp_new();
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 gen_mov_reg_FCC1(t0, src, fcc_offset);
896 tcg_gen_or_tl(dst, dst, t0);
897 tcg_gen_xori_tl(dst, dst, 0x1);
898 tcg_temp_free(t0);
901 // 0 or 3: !(FCC0 ^ FCC1)
902 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
903 unsigned int fcc_offset)
905 TCGv t0 = tcg_temp_new();
906 gen_mov_reg_FCC0(dst, src, fcc_offset);
907 gen_mov_reg_FCC1(t0, src, fcc_offset);
908 tcg_gen_xor_tl(dst, dst, t0);
909 tcg_gen_xori_tl(dst, dst, 0x1);
910 tcg_temp_free(t0);
913 // 0 or 2: !FCC0
914 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
915 unsigned int fcc_offset)
917 gen_mov_reg_FCC0(dst, src, fcc_offset);
918 tcg_gen_xori_tl(dst, dst, 0x1);
921 // !1: !(FCC0 & !FCC1)
922 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
923 unsigned int fcc_offset)
925 TCGv t0 = tcg_temp_new();
926 gen_mov_reg_FCC0(dst, src, fcc_offset);
927 gen_mov_reg_FCC1(t0, src, fcc_offset);
928 tcg_gen_andc_tl(dst, dst, t0);
929 tcg_gen_xori_tl(dst, dst, 0x1);
930 tcg_temp_free(t0);
933 // 0 or 1: !FCC1
934 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
935 unsigned int fcc_offset)
937 gen_mov_reg_FCC1(dst, src, fcc_offset);
938 tcg_gen_xori_tl(dst, dst, 0x1);
941 // !2: !(!FCC0 & FCC1)
942 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
943 unsigned int fcc_offset)
945 TCGv t0 = tcg_temp_new();
946 gen_mov_reg_FCC0(dst, src, fcc_offset);
947 gen_mov_reg_FCC1(t0, src, fcc_offset);
948 tcg_gen_andc_tl(dst, t0, dst);
949 tcg_gen_xori_tl(dst, dst, 0x1);
950 tcg_temp_free(t0);
953 // !3: !(FCC0 & FCC1)
954 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
955 unsigned int fcc_offset)
957 TCGv t0 = tcg_temp_new();
958 gen_mov_reg_FCC0(dst, src, fcc_offset);
959 gen_mov_reg_FCC1(t0, src, fcc_offset);
960 tcg_gen_and_tl(dst, dst, t0);
961 tcg_gen_xori_tl(dst, dst, 0x1);
962 tcg_temp_free(t0);
965 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
966 target_ulong pc2, TCGv r_cond)
968 int l1;
970 l1 = gen_new_label();
972 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
974 gen_goto_tb(dc, 0, pc1, pc1 + 4);
976 gen_set_label(l1);
977 gen_goto_tb(dc, 1, pc2, pc2 + 4);
980 static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
981 target_ulong pc2, TCGv r_cond)
983 int l1;
985 l1 = gen_new_label();
987 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
989 gen_goto_tb(dc, 0, pc2, pc1);
991 gen_set_label(l1);
992 gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
995 static inline void gen_generic_branch(DisasContext *dc)
997 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
998 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
999 TCGv zero = tcg_const_tl(0);
1001 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1003 tcg_temp_free(npc0);
1004 tcg_temp_free(npc1);
1005 tcg_temp_free(zero);
1008 /* call this function before using the condition register as it may
1009 have been set for a jump */
1010 static inline void flush_cond(DisasContext *dc)
1012 if (dc->npc == JUMP_PC) {
1013 gen_generic_branch(dc);
1014 dc->npc = DYNAMIC_PC;
1018 static inline void save_npc(DisasContext *dc)
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC;
1023 } else if (dc->npc != DYNAMIC_PC) {
1024 tcg_gen_movi_tl(cpu_npc, dc->npc);
1028 static inline void update_psr(DisasContext *dc)
1030 if (dc->cc_op != CC_OP_FLAGS) {
1031 dc->cc_op = CC_OP_FLAGS;
1032 gen_helper_compute_psr(cpu_env);
1036 static inline void save_state(DisasContext *dc)
1038 tcg_gen_movi_tl(cpu_pc, dc->pc);
1039 save_npc(dc);
1042 static inline void gen_mov_pc_npc(DisasContext *dc)
1044 if (dc->npc == JUMP_PC) {
1045 gen_generic_branch(dc);
1046 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1047 dc->pc = DYNAMIC_PC;
1048 } else if (dc->npc == DYNAMIC_PC) {
1049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1050 dc->pc = DYNAMIC_PC;
1051 } else {
1052 dc->pc = dc->npc;
1056 static inline void gen_op_next_insn(void)
1058 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1059 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1062 static void free_compare(DisasCompare *cmp)
1064 if (!cmp->g1) {
1065 tcg_temp_free(cmp->c1);
1067 if (!cmp->g2) {
1068 tcg_temp_free(cmp->c2);
1072 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1073 DisasContext *dc)
1075 static int subcc_cond[16] = {
1076 TCG_COND_NEVER,
1077 TCG_COND_EQ,
1078 TCG_COND_LE,
1079 TCG_COND_LT,
1080 TCG_COND_LEU,
1081 TCG_COND_LTU,
1082 -1, /* neg */
1083 -1, /* overflow */
1084 TCG_COND_ALWAYS,
1085 TCG_COND_NE,
1086 TCG_COND_GT,
1087 TCG_COND_GE,
1088 TCG_COND_GTU,
1089 TCG_COND_GEU,
1090 -1, /* pos */
1091 -1, /* no overflow */
1094 static int logic_cond[16] = {
1095 TCG_COND_NEVER,
1096 TCG_COND_EQ, /* eq: Z */
1097 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1098 TCG_COND_LT, /* lt: N ^ V -> N */
1099 TCG_COND_EQ, /* leu: C | Z -> Z */
1100 TCG_COND_NEVER, /* ltu: C -> 0 */
1101 TCG_COND_LT, /* neg: N */
1102 TCG_COND_NEVER, /* vs: V -> 0 */
1103 TCG_COND_ALWAYS,
1104 TCG_COND_NE, /* ne: !Z */
1105 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1106 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1107 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1108 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1109 TCG_COND_GE, /* pos: !N */
1110 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1113 TCGv_i32 r_src;
1114 TCGv r_dst;
1116 #ifdef TARGET_SPARC64
1117 if (xcc) {
1118 r_src = cpu_xcc;
1119 } else {
1120 r_src = cpu_psr;
1122 #else
1123 r_src = cpu_psr;
1124 #endif
1126 switch (dc->cc_op) {
1127 case CC_OP_LOGIC:
1128 cmp->cond = logic_cond[cond];
1129 do_compare_dst_0:
1130 cmp->is_bool = false;
1131 cmp->g2 = false;
1132 cmp->c2 = tcg_const_tl(0);
1133 #ifdef TARGET_SPARC64
1134 if (!xcc) {
1135 cmp->g1 = false;
1136 cmp->c1 = tcg_temp_new();
1137 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1138 break;
1140 #endif
1141 cmp->g1 = true;
1142 cmp->c1 = cpu_cc_dst;
1143 break;
1145 case CC_OP_SUB:
1146 switch (cond) {
1147 case 6: /* neg */
1148 case 14: /* pos */
1149 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1150 goto do_compare_dst_0;
1152 case 7: /* overflow */
1153 case 15: /* !overflow */
1154 goto do_dynamic;
1156 default:
1157 cmp->cond = subcc_cond[cond];
1158 cmp->is_bool = false;
1159 #ifdef TARGET_SPARC64
1160 if (!xcc) {
1161 /* Note that sign-extension works for unsigned compares as
1162 long as both operands are sign-extended. */
1163 cmp->g1 = cmp->g2 = false;
1164 cmp->c1 = tcg_temp_new();
1165 cmp->c2 = tcg_temp_new();
1166 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1167 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1168 break;
1170 #endif
1171 cmp->g1 = cmp->g2 = true;
1172 cmp->c1 = cpu_cc_src;
1173 cmp->c2 = cpu_cc_src2;
1174 break;
1176 break;
1178 default:
1179 do_dynamic:
1180 gen_helper_compute_psr(cpu_env);
1181 dc->cc_op = CC_OP_FLAGS;
1182 /* FALLTHRU */
1184 case CC_OP_FLAGS:
1185 /* We're going to generate a boolean result. */
1186 cmp->cond = TCG_COND_NE;
1187 cmp->is_bool = true;
1188 cmp->g1 = cmp->g2 = false;
1189 cmp->c1 = r_dst = tcg_temp_new();
1190 cmp->c2 = tcg_const_tl(0);
1192 switch (cond) {
1193 case 0x0:
1194 gen_op_eval_bn(r_dst);
1195 break;
1196 case 0x1:
1197 gen_op_eval_be(r_dst, r_src);
1198 break;
1199 case 0x2:
1200 gen_op_eval_ble(r_dst, r_src);
1201 break;
1202 case 0x3:
1203 gen_op_eval_bl(r_dst, r_src);
1204 break;
1205 case 0x4:
1206 gen_op_eval_bleu(r_dst, r_src);
1207 break;
1208 case 0x5:
1209 gen_op_eval_bcs(r_dst, r_src);
1210 break;
1211 case 0x6:
1212 gen_op_eval_bneg(r_dst, r_src);
1213 break;
1214 case 0x7:
1215 gen_op_eval_bvs(r_dst, r_src);
1216 break;
1217 case 0x8:
1218 gen_op_eval_ba(r_dst);
1219 break;
1220 case 0x9:
1221 gen_op_eval_bne(r_dst, r_src);
1222 break;
1223 case 0xa:
1224 gen_op_eval_bg(r_dst, r_src);
1225 break;
1226 case 0xb:
1227 gen_op_eval_bge(r_dst, r_src);
1228 break;
1229 case 0xc:
1230 gen_op_eval_bgu(r_dst, r_src);
1231 break;
1232 case 0xd:
1233 gen_op_eval_bcc(r_dst, r_src);
1234 break;
1235 case 0xe:
1236 gen_op_eval_bpos(r_dst, r_src);
1237 break;
1238 case 0xf:
1239 gen_op_eval_bvc(r_dst, r_src);
1240 break;
1242 break;
1246 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1248 unsigned int offset;
1249 TCGv r_dst;
1251 /* For now we still generate a straight boolean result. */
1252 cmp->cond = TCG_COND_NE;
1253 cmp->is_bool = true;
1254 cmp->g1 = cmp->g2 = false;
1255 cmp->c1 = r_dst = tcg_temp_new();
1256 cmp->c2 = tcg_const_tl(0);
1258 switch (cc) {
1259 default:
1260 case 0x0:
1261 offset = 0;
1262 break;
1263 case 0x1:
1264 offset = 32 - 10;
1265 break;
1266 case 0x2:
1267 offset = 34 - 10;
1268 break;
1269 case 0x3:
1270 offset = 36 - 10;
1271 break;
1274 switch (cond) {
1275 case 0x0:
1276 gen_op_eval_bn(r_dst);
1277 break;
1278 case 0x1:
1279 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1280 break;
1281 case 0x2:
1282 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1283 break;
1284 case 0x3:
1285 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1286 break;
1287 case 0x4:
1288 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1289 break;
1290 case 0x5:
1291 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1292 break;
1293 case 0x6:
1294 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1295 break;
1296 case 0x7:
1297 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1298 break;
1299 case 0x8:
1300 gen_op_eval_ba(r_dst);
1301 break;
1302 case 0x9:
1303 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1304 break;
1305 case 0xa:
1306 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1307 break;
1308 case 0xb:
1309 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1310 break;
1311 case 0xc:
1312 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1313 break;
1314 case 0xd:
1315 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1316 break;
1317 case 0xe:
1318 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1319 break;
1320 case 0xf:
1321 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1322 break;
1326 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1327 DisasContext *dc)
1329 DisasCompare cmp;
1330 gen_compare(&cmp, cc, cond, dc);
1332 /* The interface is to return a boolean in r_dst. */
1333 if (cmp.is_bool) {
1334 tcg_gen_mov_tl(r_dst, cmp.c1);
1335 } else {
1336 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1339 free_compare(&cmp);
1342 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1344 DisasCompare cmp;
1345 gen_fcompare(&cmp, cc, cond);
1347 /* The interface is to return a boolean in r_dst. */
1348 if (cmp.is_bool) {
1349 tcg_gen_mov_tl(r_dst, cmp.c1);
1350 } else {
1351 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1354 free_compare(&cmp);
1357 #ifdef TARGET_SPARC64
1358 // Inverted logic
1359 static const int gen_tcg_cond_reg[8] = {
1361 TCG_COND_NE,
1362 TCG_COND_GT,
1363 TCG_COND_GE,
1365 TCG_COND_EQ,
1366 TCG_COND_LE,
1367 TCG_COND_LT,
1370 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1372 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1373 cmp->is_bool = false;
1374 cmp->g1 = true;
1375 cmp->g2 = false;
1376 cmp->c1 = r_src;
1377 cmp->c2 = tcg_const_tl(0);
1380 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1382 DisasCompare cmp;
1383 gen_compare_reg(&cmp, cond, r_src);
1385 /* The interface is to return a boolean in r_dst. */
1386 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1388 free_compare(&cmp);
1390 #endif
1392 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1394 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1395 target_ulong target = dc->pc + offset;
1397 #ifdef TARGET_SPARC64
1398 if (unlikely(AM_CHECK(dc))) {
1399 target &= 0xffffffffULL;
1401 #endif
1402 if (cond == 0x0) {
1403 /* unconditional not taken */
1404 if (a) {
1405 dc->pc = dc->npc + 4;
1406 dc->npc = dc->pc + 4;
1407 } else {
1408 dc->pc = dc->npc;
1409 dc->npc = dc->pc + 4;
1411 } else if (cond == 0x8) {
1412 /* unconditional taken */
1413 if (a) {
1414 dc->pc = target;
1415 dc->npc = dc->pc + 4;
1416 } else {
1417 dc->pc = dc->npc;
1418 dc->npc = target;
1419 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1421 } else {
1422 flush_cond(dc);
1423 gen_cond(cpu_cond, cc, cond, dc);
1424 if (a) {
1425 gen_branch_a(dc, target, dc->npc, cpu_cond);
1426 dc->is_br = 1;
1427 } else {
1428 dc->pc = dc->npc;
1429 dc->jump_pc[0] = target;
1430 if (unlikely(dc->npc == DYNAMIC_PC)) {
1431 dc->jump_pc[1] = DYNAMIC_PC;
1432 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1433 } else {
1434 dc->jump_pc[1] = dc->npc + 4;
1435 dc->npc = JUMP_PC;
1441 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1443 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1444 target_ulong target = dc->pc + offset;
1446 #ifdef TARGET_SPARC64
1447 if (unlikely(AM_CHECK(dc))) {
1448 target &= 0xffffffffULL;
1450 #endif
1451 if (cond == 0x0) {
1452 /* unconditional not taken */
1453 if (a) {
1454 dc->pc = dc->npc + 4;
1455 dc->npc = dc->pc + 4;
1456 } else {
1457 dc->pc = dc->npc;
1458 dc->npc = dc->pc + 4;
1460 } else if (cond == 0x8) {
1461 /* unconditional taken */
1462 if (a) {
1463 dc->pc = target;
1464 dc->npc = dc->pc + 4;
1465 } else {
1466 dc->pc = dc->npc;
1467 dc->npc = target;
1468 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1470 } else {
1471 flush_cond(dc);
1472 gen_fcond(cpu_cond, cc, cond);
1473 if (a) {
1474 gen_branch_a(dc, target, dc->npc, cpu_cond);
1475 dc->is_br = 1;
1476 } else {
1477 dc->pc = dc->npc;
1478 dc->jump_pc[0] = target;
1479 if (unlikely(dc->npc == DYNAMIC_PC)) {
1480 dc->jump_pc[1] = DYNAMIC_PC;
1481 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1482 } else {
1483 dc->jump_pc[1] = dc->npc + 4;
1484 dc->npc = JUMP_PC;
1490 #ifdef TARGET_SPARC64
1491 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1492 TCGv r_reg)
1494 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1495 target_ulong target = dc->pc + offset;
1497 if (unlikely(AM_CHECK(dc))) {
1498 target &= 0xffffffffULL;
1500 flush_cond(dc);
1501 gen_cond_reg(cpu_cond, cond, r_reg);
1502 if (a) {
1503 gen_branch_a(dc, target, dc->npc, cpu_cond);
1504 dc->is_br = 1;
1505 } else {
1506 dc->pc = dc->npc;
1507 dc->jump_pc[0] = target;
1508 if (unlikely(dc->npc == DYNAMIC_PC)) {
1509 dc->jump_pc[1] = DYNAMIC_PC;
1510 tcg_gen_addi_tl(cpu_pc, cpu_npc, 4);
1511 } else {
1512 dc->jump_pc[1] = dc->npc + 4;
1513 dc->npc = JUMP_PC;
1518 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1520 switch (fccno) {
1521 case 0:
1522 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1523 break;
1524 case 1:
1525 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1526 break;
1527 case 2:
1528 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1529 break;
1530 case 3:
1531 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1532 break;
1536 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1538 switch (fccno) {
1539 case 0:
1540 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1541 break;
1542 case 1:
1543 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1544 break;
1545 case 2:
1546 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1547 break;
1548 case 3:
1549 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1550 break;
1554 static inline void gen_op_fcmpq(int fccno)
1556 switch (fccno) {
1557 case 0:
1558 gen_helper_fcmpq(cpu_env);
1559 break;
1560 case 1:
1561 gen_helper_fcmpq_fcc1(cpu_env);
1562 break;
1563 case 2:
1564 gen_helper_fcmpq_fcc2(cpu_env);
1565 break;
1566 case 3:
1567 gen_helper_fcmpq_fcc3(cpu_env);
1568 break;
1572 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1574 switch (fccno) {
1575 case 0:
1576 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1577 break;
1578 case 1:
1579 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1580 break;
1581 case 2:
1582 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1583 break;
1584 case 3:
1585 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1586 break;
1590 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1592 switch (fccno) {
1593 case 0:
1594 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1595 break;
1596 case 1:
1597 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1598 break;
1599 case 2:
1600 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1601 break;
1602 case 3:
1603 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1604 break;
1608 static inline void gen_op_fcmpeq(int fccno)
1610 switch (fccno) {
1611 case 0:
1612 gen_helper_fcmpeq(cpu_env);
1613 break;
1614 case 1:
1615 gen_helper_fcmpeq_fcc1(cpu_env);
1616 break;
1617 case 2:
1618 gen_helper_fcmpeq_fcc2(cpu_env);
1619 break;
1620 case 3:
1621 gen_helper_fcmpeq_fcc3(cpu_env);
1622 break;
1626 #else
1628 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1630 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1633 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1635 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1638 static inline void gen_op_fcmpq(int fccno)
1640 gen_helper_fcmpq(cpu_env);
1643 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1645 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1648 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1650 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1653 static inline void gen_op_fcmpeq(int fccno)
1655 gen_helper_fcmpeq(cpu_env);
1657 #endif
1659 static inline void gen_op_fpexception_im(int fsr_flags)
1661 TCGv_i32 r_const;
1663 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1664 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1665 r_const = tcg_const_i32(TT_FP_EXCP);
1666 gen_helper_raise_exception(cpu_env, r_const);
1667 tcg_temp_free_i32(r_const);
1670 static int gen_trap_ifnofpu(DisasContext *dc)
1672 #if !defined(CONFIG_USER_ONLY)
1673 if (!dc->fpu_enabled) {
1674 TCGv_i32 r_const;
1676 save_state(dc);
1677 r_const = tcg_const_i32(TT_NFPU_INSN);
1678 gen_helper_raise_exception(cpu_env, r_const);
1679 tcg_temp_free_i32(r_const);
1680 dc->is_br = 1;
1681 return 1;
1683 #endif
1684 return 0;
1687 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1689 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1692 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1693 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1695 TCGv_i32 dst, src;
1697 src = gen_load_fpr_F(dc, rs);
1698 dst = gen_dest_fpr_F(dc);
1700 gen(dst, cpu_env, src);
1702 gen_store_fpr_F(dc, rd, dst);
1705 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1706 void (*gen)(TCGv_i32, TCGv_i32))
1708 TCGv_i32 dst, src;
1710 src = gen_load_fpr_F(dc, rs);
1711 dst = gen_dest_fpr_F(dc);
1713 gen(dst, src);
1715 gen_store_fpr_F(dc, rd, dst);
1718 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1719 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1721 TCGv_i32 dst, src1, src2;
1723 src1 = gen_load_fpr_F(dc, rs1);
1724 src2 = gen_load_fpr_F(dc, rs2);
1725 dst = gen_dest_fpr_F(dc);
1727 gen(dst, cpu_env, src1, src2);
1729 gen_store_fpr_F(dc, rd, dst);
1732 #ifdef TARGET_SPARC64
1733 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1734 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1736 TCGv_i32 dst, src1, src2;
1738 src1 = gen_load_fpr_F(dc, rs1);
1739 src2 = gen_load_fpr_F(dc, rs2);
1740 dst = gen_dest_fpr_F(dc);
1742 gen(dst, src1, src2);
1744 gen_store_fpr_F(dc, rd, dst);
1746 #endif
1748 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1749 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1751 TCGv_i64 dst, src;
1753 src = gen_load_fpr_D(dc, rs);
1754 dst = gen_dest_fpr_D(dc, rd);
1756 gen(dst, cpu_env, src);
1758 gen_store_fpr_D(dc, rd, dst);
1761 #ifdef TARGET_SPARC64
1762 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1763 void (*gen)(TCGv_i64, TCGv_i64))
1765 TCGv_i64 dst, src;
1767 src = gen_load_fpr_D(dc, rs);
1768 dst = gen_dest_fpr_D(dc, rd);
1770 gen(dst, src);
1772 gen_store_fpr_D(dc, rd, dst);
1774 #endif
1776 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1777 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1779 TCGv_i64 dst, src1, src2;
1781 src1 = gen_load_fpr_D(dc, rs1);
1782 src2 = gen_load_fpr_D(dc, rs2);
1783 dst = gen_dest_fpr_D(dc, rd);
1785 gen(dst, cpu_env, src1, src2);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1792 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1794 TCGv_i64 dst, src1, src2;
1796 src1 = gen_load_fpr_D(dc, rs1);
1797 src2 = gen_load_fpr_D(dc, rs2);
1798 dst = gen_dest_fpr_D(dc, rd);
1800 gen(dst, src1, src2);
1802 gen_store_fpr_D(dc, rd, dst);
1805 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_gsr, src1, src2);
1816 gen_store_fpr_D(dc, rd, dst);
1819 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1820 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1822 TCGv_i64 dst, src0, src1, src2;
1824 src1 = gen_load_fpr_D(dc, rs1);
1825 src2 = gen_load_fpr_D(dc, rs2);
1826 src0 = gen_load_fpr_D(dc, rd);
1827 dst = gen_dest_fpr_D(dc, rd);
1829 gen(dst, src0, src1, src2);
1831 gen_store_fpr_D(dc, rd, dst);
1833 #endif
1835 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1836 void (*gen)(TCGv_ptr))
1838 gen_op_load_fpr_QT1(QFPREG(rs));
1840 gen(cpu_env);
1842 gen_op_store_QT0_fpr(QFPREG(rd));
1843 gen_update_fprs_dirty(QFPREG(rd));
1846 #ifdef TARGET_SPARC64
1847 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1848 void (*gen)(TCGv_ptr))
1850 gen_op_load_fpr_QT1(QFPREG(rs));
1852 gen(cpu_env);
1854 gen_op_store_QT0_fpr(QFPREG(rd));
1855 gen_update_fprs_dirty(QFPREG(rd));
1857 #endif
1859 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1860 void (*gen)(TCGv_ptr))
1862 gen_op_load_fpr_QT0(QFPREG(rs1));
1863 gen_op_load_fpr_QT1(QFPREG(rs2));
1865 gen(cpu_env);
1867 gen_op_store_QT0_fpr(QFPREG(rd));
1868 gen_update_fprs_dirty(QFPREG(rd));
1871 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1872 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1874 TCGv_i64 dst;
1875 TCGv_i32 src1, src2;
1877 src1 = gen_load_fpr_F(dc, rs1);
1878 src2 = gen_load_fpr_F(dc, rs2);
1879 dst = gen_dest_fpr_D(dc, rd);
1881 gen(dst, cpu_env, src1, src2);
1883 gen_store_fpr_D(dc, rd, dst);
1886 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1887 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1889 TCGv_i64 src1, src2;
1891 src1 = gen_load_fpr_D(dc, rs1);
1892 src2 = gen_load_fpr_D(dc, rs2);
1894 gen(cpu_env, src1, src2);
1896 gen_op_store_QT0_fpr(QFPREG(rd));
1897 gen_update_fprs_dirty(QFPREG(rd));
1900 #ifdef TARGET_SPARC64
1901 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1902 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1904 TCGv_i64 dst;
1905 TCGv_i32 src;
1907 src = gen_load_fpr_F(dc, rs);
1908 dst = gen_dest_fpr_D(dc, rd);
1910 gen(dst, cpu_env, src);
1912 gen_store_fpr_D(dc, rd, dst);
1914 #endif
1916 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1917 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1919 TCGv_i64 dst;
1920 TCGv_i32 src;
1922 src = gen_load_fpr_F(dc, rs);
1923 dst = gen_dest_fpr_D(dc, rd);
1925 gen(dst, cpu_env, src);
1927 gen_store_fpr_D(dc, rd, dst);
1930 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1931 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1933 TCGv_i32 dst;
1934 TCGv_i64 src;
1936 src = gen_load_fpr_D(dc, rs);
1937 dst = gen_dest_fpr_F(dc);
1939 gen(dst, cpu_env, src);
1941 gen_store_fpr_F(dc, rd, dst);
1944 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1945 void (*gen)(TCGv_i32, TCGv_ptr))
1947 TCGv_i32 dst;
1949 gen_op_load_fpr_QT1(QFPREG(rs));
1950 dst = gen_dest_fpr_F(dc);
1952 gen(dst, cpu_env);
1954 gen_store_fpr_F(dc, rd, dst);
1957 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1958 void (*gen)(TCGv_i64, TCGv_ptr))
1960 TCGv_i64 dst;
1962 gen_op_load_fpr_QT1(QFPREG(rs));
1963 dst = gen_dest_fpr_D(dc, rd);
1965 gen(dst, cpu_env);
1967 gen_store_fpr_D(dc, rd, dst);
1970 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1971 void (*gen)(TCGv_ptr, TCGv_i32))
1973 TCGv_i32 src;
1975 src = gen_load_fpr_F(dc, rs);
1977 gen(cpu_env, src);
1979 gen_op_store_QT0_fpr(QFPREG(rd));
1980 gen_update_fprs_dirty(QFPREG(rd));
1983 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1984 void (*gen)(TCGv_ptr, TCGv_i64))
1986 TCGv_i64 src;
1988 src = gen_load_fpr_D(dc, rs);
1990 gen(cpu_env, src);
1992 gen_op_store_QT0_fpr(QFPREG(rd));
1993 gen_update_fprs_dirty(QFPREG(rd));
1996 /* asi moves */
1997 #ifdef TARGET_SPARC64
1998 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
2000 int asi;
2001 TCGv_i32 r_asi;
2003 if (IS_IMM) {
2004 r_asi = tcg_temp_new_i32();
2005 tcg_gen_mov_i32(r_asi, cpu_asi);
2006 } else {
2007 asi = GET_FIELD(insn, 19, 26);
2008 r_asi = tcg_const_i32(asi);
2010 return r_asi;
2013 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2014 int sign)
2016 TCGv_i32 r_asi, r_size, r_sign;
2018 r_asi = gen_get_asi(insn, addr);
2019 r_size = tcg_const_i32(size);
2020 r_sign = tcg_const_i32(sign);
2021 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
2022 tcg_temp_free_i32(r_sign);
2023 tcg_temp_free_i32(r_size);
2024 tcg_temp_free_i32(r_asi);
2027 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2029 TCGv_i32 r_asi, r_size;
2031 r_asi = gen_get_asi(insn, addr);
2032 r_size = tcg_const_i32(size);
2033 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2034 tcg_temp_free_i32(r_size);
2035 tcg_temp_free_i32(r_asi);
2038 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2040 TCGv_i32 r_asi, r_size, r_rd;
2042 r_asi = gen_get_asi(insn, addr);
2043 r_size = tcg_const_i32(size);
2044 r_rd = tcg_const_i32(rd);
2045 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2046 tcg_temp_free_i32(r_rd);
2047 tcg_temp_free_i32(r_size);
2048 tcg_temp_free_i32(r_asi);
2051 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2053 TCGv_i32 r_asi, r_size, r_rd;
2055 r_asi = gen_get_asi(insn, addr);
2056 r_size = tcg_const_i32(size);
2057 r_rd = tcg_const_i32(rd);
2058 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2059 tcg_temp_free_i32(r_rd);
2060 tcg_temp_free_i32(r_size);
2061 tcg_temp_free_i32(r_asi);
2064 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2066 TCGv_i32 r_asi, r_size, r_sign;
2067 TCGv_i64 t64 = tcg_temp_new_i64();
2069 r_asi = gen_get_asi(insn, addr);
2070 r_size = tcg_const_i32(4);
2071 r_sign = tcg_const_i32(0);
2072 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2073 tcg_temp_free_i32(r_sign);
2074 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2075 tcg_temp_free_i32(r_size);
2076 tcg_temp_free_i32(r_asi);
2077 tcg_gen_trunc_i64_tl(dst, t64);
2078 tcg_temp_free_i64(t64);
2081 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2082 int insn, int rd)
2084 TCGv_i32 r_asi, r_rd;
2086 r_asi = gen_get_asi(insn, addr);
2087 r_rd = tcg_const_i32(rd);
2088 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2089 tcg_temp_free_i32(r_rd);
2090 tcg_temp_free_i32(r_asi);
2093 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2094 int insn, int rd)
2096 TCGv_i32 r_asi, r_size;
2097 TCGv lo = gen_load_gpr(dc, rd + 1);
2098 TCGv_i64 t64 = tcg_temp_new_i64();
2100 tcg_gen_concat_tl_i64(t64, lo, hi);
2101 r_asi = gen_get_asi(insn, addr);
2102 r_size = tcg_const_i32(8);
2103 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2104 tcg_temp_free_i32(r_size);
2105 tcg_temp_free_i32(r_asi);
2106 tcg_temp_free_i64(t64);
2109 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2110 TCGv val2, int insn, int rd)
2112 TCGv val1 = gen_load_gpr(dc, rd);
2113 TCGv dst = gen_dest_gpr(dc, rd);
2114 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2116 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2117 tcg_temp_free_i32(r_asi);
2118 gen_store_gpr(dc, rd, dst);
2121 #elif !defined(CONFIG_USER_ONLY)
2123 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2124 int sign)
2126 TCGv_i32 r_asi, r_size, r_sign;
2127 TCGv_i64 t64 = tcg_temp_new_i64();
2129 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2130 r_size = tcg_const_i32(size);
2131 r_sign = tcg_const_i32(sign);
2132 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2133 tcg_temp_free_i32(r_sign);
2134 tcg_temp_free_i32(r_size);
2135 tcg_temp_free_i32(r_asi);
2136 tcg_gen_trunc_i64_tl(dst, t64);
2137 tcg_temp_free_i64(t64);
2140 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2142 TCGv_i32 r_asi, r_size;
2143 TCGv_i64 t64 = tcg_temp_new_i64();
2145 tcg_gen_extu_tl_i64(t64, src);
2146 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2147 r_size = tcg_const_i32(size);
2148 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2149 tcg_temp_free_i32(r_size);
2150 tcg_temp_free_i32(r_asi);
2151 tcg_temp_free_i64(t64);
2154 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2156 TCGv_i32 r_asi, r_size, r_sign;
2157 TCGv_i64 r_val, t64;
2159 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2160 r_size = tcg_const_i32(4);
2161 r_sign = tcg_const_i32(0);
2162 t64 = tcg_temp_new_i64();
2163 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2164 tcg_temp_free(r_sign);
2165 r_val = tcg_temp_new_i64();
2166 tcg_gen_extu_tl_i64(r_val, src);
2167 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2168 tcg_temp_free_i64(r_val);
2169 tcg_temp_free_i32(r_size);
2170 tcg_temp_free_i32(r_asi);
2171 tcg_gen_trunc_i64_tl(dst, t64);
2172 tcg_temp_free_i64(t64);
2175 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2176 int insn, int rd)
2178 TCGv_i32 r_asi, r_size, r_sign;
2179 TCGv t;
2180 TCGv_i64 t64;
2182 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2183 r_size = tcg_const_i32(8);
2184 r_sign = tcg_const_i32(0);
2185 t64 = tcg_temp_new_i64();
2186 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2187 tcg_temp_free_i32(r_sign);
2188 tcg_temp_free_i32(r_size);
2189 tcg_temp_free_i32(r_asi);
2191 t = gen_dest_gpr(dc, rd + 1);
2192 tcg_gen_trunc_i64_tl(t, t64);
2193 gen_store_gpr(dc, rd + 1, t);
2195 tcg_gen_shri_i64(t64, t64, 32);
2196 tcg_gen_trunc_i64_tl(hi, t64);
2197 tcg_temp_free_i64(t64);
2198 gen_store_gpr(dc, rd, hi);
2201 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2202 int insn, int rd)
2204 TCGv_i32 r_asi, r_size;
2205 TCGv lo = gen_load_gpr(dc, rd + 1);
2206 TCGv_i64 t64 = tcg_temp_new_i64();
2208 tcg_gen_concat_tl_i64(t64, lo, hi);
2209 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2210 r_size = tcg_const_i32(8);
2211 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2212 tcg_temp_free_i32(r_size);
2213 tcg_temp_free_i32(r_asi);
2214 tcg_temp_free_i64(t64);
2216 #endif
2218 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2219 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2220 TCGv val2, int insn, int rd)
2222 TCGv val1 = gen_load_gpr(dc, rd);
2223 TCGv dst = gen_dest_gpr(dc, rd);
2224 #ifdef TARGET_SPARC64
2225 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2226 #else
2227 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2228 #endif
2230 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2231 tcg_temp_free_i32(r_asi);
2232 gen_store_gpr(dc, rd, dst);
2235 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2237 TCGv_i64 r_val;
2238 TCGv_i32 r_asi, r_size;
2240 gen_ld_asi(dst, addr, insn, 1, 0);
2242 r_val = tcg_const_i64(0xffULL);
2243 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2244 r_size = tcg_const_i32(1);
2245 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2246 tcg_temp_free_i32(r_size);
2247 tcg_temp_free_i32(r_asi);
2248 tcg_temp_free_i64(r_val);
2250 #endif
2252 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2254 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2255 return gen_load_gpr(dc, rs1);
2258 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2260 if (IS_IMM) { /* immediate */
2261 target_long simm = GET_FIELDs(insn, 19, 31);
2262 TCGv t = get_temp_tl(dc);
2263 tcg_gen_movi_tl(t, simm);
2264 return t;
2265 } else { /* register */
2266 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2267 return gen_load_gpr(dc, rs2);
2271 #ifdef TARGET_SPARC64
2272 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2274 TCGv_i32 c32, zero, dst, s1, s2;
2276 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2277 or fold the comparison down to 32 bits and use movcond_i32. Choose
2278 the later. */
2279 c32 = tcg_temp_new_i32();
2280 if (cmp->is_bool) {
2281 tcg_gen_trunc_i64_i32(c32, cmp->c1);
2282 } else {
2283 TCGv_i64 c64 = tcg_temp_new_i64();
2284 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2285 tcg_gen_trunc_i64_i32(c32, c64);
2286 tcg_temp_free_i64(c64);
2289 s1 = gen_load_fpr_F(dc, rs);
2290 s2 = gen_load_fpr_F(dc, rd);
2291 dst = gen_dest_fpr_F(dc);
2292 zero = tcg_const_i32(0);
2294 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2296 tcg_temp_free_i32(c32);
2297 tcg_temp_free_i32(zero);
2298 gen_store_fpr_F(dc, rd, dst);
2301 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2303 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2304 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2305 gen_load_fpr_D(dc, rs),
2306 gen_load_fpr_D(dc, rd));
2307 gen_store_fpr_D(dc, rd, dst);
2310 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2312 int qd = QFPREG(rd);
2313 int qs = QFPREG(rs);
2315 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2316 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2317 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2318 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2320 gen_update_fprs_dirty(qd);
2323 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2325 TCGv_i32 r_tl = tcg_temp_new_i32();
2327 /* load env->tl into r_tl */
2328 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2330 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2331 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2333 /* calculate offset to current trap state from env->ts, reuse r_tl */
2334 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2335 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2337 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2339 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2340 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2341 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2342 tcg_temp_free_ptr(r_tl_tmp);
2345 tcg_temp_free_i32(r_tl);
2348 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2349 int width, bool cc, bool left)
2351 TCGv lo1, lo2, t1, t2;
2352 uint64_t amask, tabl, tabr;
2353 int shift, imask, omask;
2355 if (cc) {
2356 tcg_gen_mov_tl(cpu_cc_src, s1);
2357 tcg_gen_mov_tl(cpu_cc_src2, s2);
2358 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2359 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2360 dc->cc_op = CC_OP_SUB;
2363 /* Theory of operation: there are two tables, left and right (not to
2364 be confused with the left and right versions of the opcode). These
2365 are indexed by the low 3 bits of the inputs. To make things "easy",
2366 these tables are loaded into two constants, TABL and TABR below.
2367 The operation index = (input & imask) << shift calculates the index
2368 into the constant, while val = (table >> index) & omask calculates
2369 the value we're looking for. */
2370 switch (width) {
2371 case 8:
2372 imask = 0x7;
2373 shift = 3;
2374 omask = 0xff;
2375 if (left) {
2376 tabl = 0x80c0e0f0f8fcfeffULL;
2377 tabr = 0xff7f3f1f0f070301ULL;
2378 } else {
2379 tabl = 0x0103070f1f3f7fffULL;
2380 tabr = 0xfffefcf8f0e0c080ULL;
2382 break;
2383 case 16:
2384 imask = 0x6;
2385 shift = 1;
2386 omask = 0xf;
2387 if (left) {
2388 tabl = 0x8cef;
2389 tabr = 0xf731;
2390 } else {
2391 tabl = 0x137f;
2392 tabr = 0xfec8;
2394 break;
2395 case 32:
2396 imask = 0x4;
2397 shift = 0;
2398 omask = 0x3;
2399 if (left) {
2400 tabl = (2 << 2) | 3;
2401 tabr = (3 << 2) | 1;
2402 } else {
2403 tabl = (1 << 2) | 3;
2404 tabr = (3 << 2) | 2;
2406 break;
2407 default:
2408 abort();
2411 lo1 = tcg_temp_new();
2412 lo2 = tcg_temp_new();
2413 tcg_gen_andi_tl(lo1, s1, imask);
2414 tcg_gen_andi_tl(lo2, s2, imask);
2415 tcg_gen_shli_tl(lo1, lo1, shift);
2416 tcg_gen_shli_tl(lo2, lo2, shift);
2418 t1 = tcg_const_tl(tabl);
2419 t2 = tcg_const_tl(tabr);
2420 tcg_gen_shr_tl(lo1, t1, lo1);
2421 tcg_gen_shr_tl(lo2, t2, lo2);
2422 tcg_gen_andi_tl(dst, lo1, omask);
2423 tcg_gen_andi_tl(lo2, lo2, omask);
2425 amask = -8;
2426 if (AM_CHECK(dc)) {
2427 amask &= 0xffffffffULL;
2429 tcg_gen_andi_tl(s1, s1, amask);
2430 tcg_gen_andi_tl(s2, s2, amask);
2432 /* We want to compute
2433 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2434 We've already done dst = lo1, so this reduces to
2435 dst &= (s1 == s2 ? -1 : lo2)
2436 Which we perform by
2437 lo2 |= -(s1 == s2)
2438 dst &= lo2
2440 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2441 tcg_gen_neg_tl(t1, t1);
2442 tcg_gen_or_tl(lo2, lo2, t1);
2443 tcg_gen_and_tl(dst, dst, lo2);
2445 tcg_temp_free(lo1);
2446 tcg_temp_free(lo2);
2447 tcg_temp_free(t1);
2448 tcg_temp_free(t2);
2451 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2453 TCGv tmp = tcg_temp_new();
2455 tcg_gen_add_tl(tmp, s1, s2);
2456 tcg_gen_andi_tl(dst, tmp, -8);
2457 if (left) {
2458 tcg_gen_neg_tl(tmp, tmp);
2460 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2462 tcg_temp_free(tmp);
2465 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2467 TCGv t1, t2, shift;
2469 t1 = tcg_temp_new();
2470 t2 = tcg_temp_new();
2471 shift = tcg_temp_new();
2473 tcg_gen_andi_tl(shift, gsr, 7);
2474 tcg_gen_shli_tl(shift, shift, 3);
2475 tcg_gen_shl_tl(t1, s1, shift);
2477 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2478 shift of (up to 63) followed by a constant shift of 1. */
2479 tcg_gen_xori_tl(shift, shift, 63);
2480 tcg_gen_shr_tl(t2, s2, shift);
2481 tcg_gen_shri_tl(t2, t2, 1);
2483 tcg_gen_or_tl(dst, t1, t2);
2485 tcg_temp_free(t1);
2486 tcg_temp_free(t2);
2487 tcg_temp_free(shift);
2489 #endif
2491 #define CHECK_IU_FEATURE(dc, FEATURE) \
2492 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2493 goto illegal_insn;
2494 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2495 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2496 goto nfpu_insn;
2498 /* before an instruction, dc->pc must be static */
2499 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2501 unsigned int opc, rs1, rs2, rd;
2502 TCGv cpu_src1, cpu_src2;
2503 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2504 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2505 target_long simm;
2507 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2508 tcg_gen_debug_insn_start(dc->pc);
2511 opc = GET_FIELD(insn, 0, 1);
2512 rd = GET_FIELD(insn, 2, 6);
2514 switch (opc) {
2515 case 0: /* branches/sethi */
2517 unsigned int xop = GET_FIELD(insn, 7, 9);
2518 int32_t target;
2519 switch (xop) {
2520 #ifdef TARGET_SPARC64
2521 case 0x1: /* V9 BPcc */
2523 int cc;
2525 target = GET_FIELD_SP(insn, 0, 18);
2526 target = sign_extend(target, 19);
2527 target <<= 2;
2528 cc = GET_FIELD_SP(insn, 20, 21);
2529 if (cc == 0)
2530 do_branch(dc, target, insn, 0);
2531 else if (cc == 2)
2532 do_branch(dc, target, insn, 1);
2533 else
2534 goto illegal_insn;
2535 goto jmp_insn;
2537 case 0x3: /* V9 BPr */
2539 target = GET_FIELD_SP(insn, 0, 13) |
2540 (GET_FIELD_SP(insn, 20, 21) << 14);
2541 target = sign_extend(target, 16);
2542 target <<= 2;
2543 cpu_src1 = get_src1(dc, insn);
2544 do_branch_reg(dc, target, insn, cpu_src1);
2545 goto jmp_insn;
2547 case 0x5: /* V9 FBPcc */
2549 int cc = GET_FIELD_SP(insn, 20, 21);
2550 if (gen_trap_ifnofpu(dc)) {
2551 goto jmp_insn;
2553 target = GET_FIELD_SP(insn, 0, 18);
2554 target = sign_extend(target, 19);
2555 target <<= 2;
2556 do_fbranch(dc, target, insn, cc);
2557 goto jmp_insn;
2559 #else
2560 case 0x7: /* CBN+x */
2562 goto ncp_insn;
2564 #endif
2565 case 0x2: /* BN+x */
2567 target = GET_FIELD(insn, 10, 31);
2568 target = sign_extend(target, 22);
2569 target <<= 2;
2570 do_branch(dc, target, insn, 0);
2571 goto jmp_insn;
2573 case 0x6: /* FBN+x */
2575 if (gen_trap_ifnofpu(dc)) {
2576 goto jmp_insn;
2578 target = GET_FIELD(insn, 10, 31);
2579 target = sign_extend(target, 22);
2580 target <<= 2;
2581 do_fbranch(dc, target, insn, 0);
2582 goto jmp_insn;
2584 case 0x4: /* SETHI */
2585 /* Special-case %g0 because that's the canonical nop. */
2586 if (rd) {
2587 uint32_t value = GET_FIELD(insn, 10, 31);
2588 TCGv t = gen_dest_gpr(dc, rd);
2589 tcg_gen_movi_tl(t, value << 10);
2590 gen_store_gpr(dc, rd, t);
2592 break;
2593 case 0x0: /* UNIMPL */
2594 default:
2595 goto illegal_insn;
2597 break;
2599 break;
2600 case 1: /*CALL*/
2602 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2603 TCGv o7 = gen_dest_gpr(dc, 15);
2605 tcg_gen_movi_tl(o7, dc->pc);
2606 gen_store_gpr(dc, 15, o7);
2607 target += dc->pc;
2608 gen_mov_pc_npc(dc);
2609 #ifdef TARGET_SPARC64
2610 if (unlikely(AM_CHECK(dc))) {
2611 target &= 0xffffffffULL;
2613 #endif
2614 dc->npc = target;
2616 goto jmp_insn;
2617 case 2: /* FPU & Logical Operations */
2619 unsigned int xop = GET_FIELD(insn, 7, 12);
2620 TCGv cpu_dst = get_temp_tl(dc);
2621 TCGv cpu_tmp0;
2623 if (xop == 0x3a) { /* generate trap */
2624 int cond = GET_FIELD(insn, 3, 6);
2625 TCGv_i32 trap;
2626 int l1 = -1, mask;
2628 if (cond == 0) {
2629 /* Trap never. */
2630 break;
2633 save_state(dc);
2635 if (cond != 8) {
2636 /* Conditional trap. */
2637 DisasCompare cmp;
2638 #ifdef TARGET_SPARC64
2639 /* V9 icc/xcc */
2640 int cc = GET_FIELD_SP(insn, 11, 12);
2641 if (cc == 0) {
2642 gen_compare(&cmp, 0, cond, dc);
2643 } else if (cc == 2) {
2644 gen_compare(&cmp, 1, cond, dc);
2645 } else {
2646 goto illegal_insn;
2648 #else
2649 gen_compare(&cmp, 0, cond, dc);
2650 #endif
2651 l1 = gen_new_label();
2652 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2653 cmp.c1, cmp.c2, l1);
2654 free_compare(&cmp);
2657 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2658 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2660 /* Don't use the normal temporaries, as they may well have
2661 gone out of scope with the branch above. While we're
2662 doing that we might as well pre-truncate to 32-bit. */
2663 trap = tcg_temp_new_i32();
2665 rs1 = GET_FIELD_SP(insn, 14, 18);
2666 if (IS_IMM) {
2667 rs2 = GET_FIELD_SP(insn, 0, 6);
2668 if (rs1 == 0) {
2669 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2670 /* Signal that the trap value is fully constant. */
2671 mask = 0;
2672 } else {
2673 TCGv t1 = gen_load_gpr(dc, rs1);
2674 tcg_gen_trunc_tl_i32(trap, t1);
2675 tcg_gen_addi_i32(trap, trap, rs2);
2677 } else {
2678 TCGv t1, t2;
2679 rs2 = GET_FIELD_SP(insn, 0, 4);
2680 t1 = gen_load_gpr(dc, rs1);
2681 t2 = gen_load_gpr(dc, rs2);
2682 tcg_gen_add_tl(t1, t1, t2);
2683 tcg_gen_trunc_tl_i32(trap, t1);
2685 if (mask != 0) {
2686 tcg_gen_andi_i32(trap, trap, mask);
2687 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2690 gen_helper_raise_exception(cpu_env, trap);
2691 tcg_temp_free_i32(trap);
2693 if (cond == 8) {
2694 /* An unconditional trap ends the TB. */
2695 dc->is_br = 1;
2696 goto jmp_insn;
2697 } else {
2698 /* A conditional trap falls through to the next insn. */
2699 gen_set_label(l1);
2700 break;
2702 } else if (xop == 0x28) {
2703 rs1 = GET_FIELD(insn, 13, 17);
2704 switch(rs1) {
2705 case 0: /* rdy */
2706 #ifndef TARGET_SPARC64
2707 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2708 manual, rdy on the microSPARC
2709 II */
2710 case 0x0f: /* stbar in the SPARCv8 manual,
2711 rdy on the microSPARC II */
2712 case 0x10 ... 0x1f: /* implementation-dependent in the
2713 SPARCv8 manual, rdy on the
2714 microSPARC II */
2715 /* Read Asr17 */
2716 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2717 TCGv t = gen_dest_gpr(dc, rd);
2718 /* Read Asr17 for a Leon3 monoprocessor */
2719 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2720 gen_store_gpr(dc, rd, t);
2721 break;
2723 #endif
2724 gen_store_gpr(dc, rd, cpu_y);
2725 break;
2726 #ifdef TARGET_SPARC64
2727 case 0x2: /* V9 rdccr */
2728 update_psr(dc);
2729 gen_helper_rdccr(cpu_dst, cpu_env);
2730 gen_store_gpr(dc, rd, cpu_dst);
2731 break;
2732 case 0x3: /* V9 rdasi */
2733 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2734 gen_store_gpr(dc, rd, cpu_dst);
2735 break;
2736 case 0x4: /* V9 rdtick */
2738 TCGv_ptr r_tickptr;
2740 r_tickptr = tcg_temp_new_ptr();
2741 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2742 offsetof(CPUSPARCState, tick));
2743 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2744 tcg_temp_free_ptr(r_tickptr);
2745 gen_store_gpr(dc, rd, cpu_dst);
2747 break;
2748 case 0x5: /* V9 rdpc */
2750 TCGv t = gen_dest_gpr(dc, rd);
2751 if (unlikely(AM_CHECK(dc))) {
2752 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2753 } else {
2754 tcg_gen_movi_tl(t, dc->pc);
2756 gen_store_gpr(dc, rd, t);
2758 break;
2759 case 0x6: /* V9 rdfprs */
2760 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2761 gen_store_gpr(dc, rd, cpu_dst);
2762 break;
2763 case 0xf: /* V9 membar */
2764 break; /* no effect */
2765 case 0x13: /* Graphics Status */
2766 if (gen_trap_ifnofpu(dc)) {
2767 goto jmp_insn;
2769 gen_store_gpr(dc, rd, cpu_gsr);
2770 break;
2771 case 0x16: /* Softint */
2772 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2773 gen_store_gpr(dc, rd, cpu_dst);
2774 break;
2775 case 0x17: /* Tick compare */
2776 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2777 break;
2778 case 0x18: /* System tick */
2780 TCGv_ptr r_tickptr;
2782 r_tickptr = tcg_temp_new_ptr();
2783 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2784 offsetof(CPUSPARCState, stick));
2785 gen_helper_tick_get_count(cpu_dst, r_tickptr);
2786 tcg_temp_free_ptr(r_tickptr);
2787 gen_store_gpr(dc, rd, cpu_dst);
2789 break;
2790 case 0x19: /* System tick compare */
2791 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2792 break;
2793 case 0x10: /* Performance Control */
2794 case 0x11: /* Performance Instrumentation Counter */
2795 case 0x12: /* Dispatch Control */
2796 case 0x14: /* Softint set, WO */
2797 case 0x15: /* Softint clear, WO */
2798 #endif
2799 default:
2800 goto illegal_insn;
2802 #if !defined(CONFIG_USER_ONLY)
2803 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2804 #ifndef TARGET_SPARC64
2805 if (!supervisor(dc)) {
2806 goto priv_insn;
2808 update_psr(dc);
2809 gen_helper_rdpsr(cpu_dst, cpu_env);
2810 #else
2811 CHECK_IU_FEATURE(dc, HYPV);
2812 if (!hypervisor(dc))
2813 goto priv_insn;
2814 rs1 = GET_FIELD(insn, 13, 17);
2815 switch (rs1) {
2816 case 0: // hpstate
2817 // gen_op_rdhpstate();
2818 break;
2819 case 1: // htstate
2820 // gen_op_rdhtstate();
2821 break;
2822 case 3: // hintp
2823 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2824 break;
2825 case 5: // htba
2826 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2827 break;
2828 case 6: // hver
2829 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2830 break;
2831 case 31: // hstick_cmpr
2832 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2833 break;
2834 default:
2835 goto illegal_insn;
2837 #endif
2838 gen_store_gpr(dc, rd, cpu_dst);
2839 break;
2840 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2841 if (!supervisor(dc)) {
2842 goto priv_insn;
2844 cpu_tmp0 = get_temp_tl(dc);
2845 #ifdef TARGET_SPARC64
2846 rs1 = GET_FIELD(insn, 13, 17);
2847 switch (rs1) {
2848 case 0: // tpc
2850 TCGv_ptr r_tsptr;
2852 r_tsptr = tcg_temp_new_ptr();
2853 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2854 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2855 offsetof(trap_state, tpc));
2856 tcg_temp_free_ptr(r_tsptr);
2858 break;
2859 case 1: // tnpc
2861 TCGv_ptr r_tsptr;
2863 r_tsptr = tcg_temp_new_ptr();
2864 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2865 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2866 offsetof(trap_state, tnpc));
2867 tcg_temp_free_ptr(r_tsptr);
2869 break;
2870 case 2: // tstate
2872 TCGv_ptr r_tsptr;
2874 r_tsptr = tcg_temp_new_ptr();
2875 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2876 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2877 offsetof(trap_state, tstate));
2878 tcg_temp_free_ptr(r_tsptr);
2880 break;
2881 case 3: // tt
2883 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2885 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2886 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2887 offsetof(trap_state, tt));
2888 tcg_temp_free_ptr(r_tsptr);
2890 break;
2891 case 4: // tick
2893 TCGv_ptr r_tickptr;
2895 r_tickptr = tcg_temp_new_ptr();
2896 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2897 offsetof(CPUSPARCState, tick));
2898 gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
2899 tcg_temp_free_ptr(r_tickptr);
2901 break;
2902 case 5: // tba
2903 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2904 break;
2905 case 6: // pstate
2906 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2907 offsetof(CPUSPARCState, pstate));
2908 break;
2909 case 7: // tl
2910 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2911 offsetof(CPUSPARCState, tl));
2912 break;
2913 case 8: // pil
2914 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2915 offsetof(CPUSPARCState, psrpil));
2916 break;
2917 case 9: // cwp
2918 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2919 break;
2920 case 10: // cansave
2921 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2922 offsetof(CPUSPARCState, cansave));
2923 break;
2924 case 11: // canrestore
2925 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2926 offsetof(CPUSPARCState, canrestore));
2927 break;
2928 case 12: // cleanwin
2929 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2930 offsetof(CPUSPARCState, cleanwin));
2931 break;
2932 case 13: // otherwin
2933 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2934 offsetof(CPUSPARCState, otherwin));
2935 break;
2936 case 14: // wstate
2937 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2938 offsetof(CPUSPARCState, wstate));
2939 break;
2940 case 16: // UA2005 gl
2941 CHECK_IU_FEATURE(dc, GL);
2942 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2943 offsetof(CPUSPARCState, gl));
2944 break;
2945 case 26: // UA2005 strand status
2946 CHECK_IU_FEATURE(dc, HYPV);
2947 if (!hypervisor(dc))
2948 goto priv_insn;
2949 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2950 break;
2951 case 31: // ver
2952 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2953 break;
2954 case 15: // fq
2955 default:
2956 goto illegal_insn;
2958 #else
2959 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2960 #endif
2961 gen_store_gpr(dc, rd, cpu_tmp0);
2962 break;
2963 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2964 #ifdef TARGET_SPARC64
2965 save_state(dc);
2966 gen_helper_flushw(cpu_env);
2967 #else
2968 if (!supervisor(dc))
2969 goto priv_insn;
2970 gen_store_gpr(dc, rd, cpu_tbr);
2971 #endif
2972 break;
2973 #endif
2974 } else if (xop == 0x34) { /* FPU Operations */
2975 if (gen_trap_ifnofpu(dc)) {
2976 goto jmp_insn;
2978 gen_op_clear_ieee_excp_and_FTT();
2979 rs1 = GET_FIELD(insn, 13, 17);
2980 rs2 = GET_FIELD(insn, 27, 31);
2981 xop = GET_FIELD(insn, 18, 26);
2982 save_state(dc);
2983 switch (xop) {
2984 case 0x1: /* fmovs */
2985 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2986 gen_store_fpr_F(dc, rd, cpu_src1_32);
2987 break;
2988 case 0x5: /* fnegs */
2989 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2990 break;
2991 case 0x9: /* fabss */
2992 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2993 break;
2994 case 0x29: /* fsqrts */
2995 CHECK_FPU_FEATURE(dc, FSQRT);
2996 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2997 break;
2998 case 0x2a: /* fsqrtd */
2999 CHECK_FPU_FEATURE(dc, FSQRT);
3000 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3001 break;
3002 case 0x2b: /* fsqrtq */
3003 CHECK_FPU_FEATURE(dc, FLOAT128);
3004 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3005 break;
3006 case 0x41: /* fadds */
3007 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3008 break;
3009 case 0x42: /* faddd */
3010 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3011 break;
3012 case 0x43: /* faddq */
3013 CHECK_FPU_FEATURE(dc, FLOAT128);
3014 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3015 break;
3016 case 0x45: /* fsubs */
3017 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3018 break;
3019 case 0x46: /* fsubd */
3020 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3021 break;
3022 case 0x47: /* fsubq */
3023 CHECK_FPU_FEATURE(dc, FLOAT128);
3024 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3025 break;
3026 case 0x49: /* fmuls */
3027 CHECK_FPU_FEATURE(dc, FMUL);
3028 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3029 break;
3030 case 0x4a: /* fmuld */
3031 CHECK_FPU_FEATURE(dc, FMUL);
3032 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3033 break;
3034 case 0x4b: /* fmulq */
3035 CHECK_FPU_FEATURE(dc, FLOAT128);
3036 CHECK_FPU_FEATURE(dc, FMUL);
3037 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3038 break;
3039 case 0x4d: /* fdivs */
3040 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3041 break;
3042 case 0x4e: /* fdivd */
3043 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3044 break;
3045 case 0x4f: /* fdivq */
3046 CHECK_FPU_FEATURE(dc, FLOAT128);
3047 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3048 break;
3049 case 0x69: /* fsmuld */
3050 CHECK_FPU_FEATURE(dc, FSMULD);
3051 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3052 break;
3053 case 0x6e: /* fdmulq */
3054 CHECK_FPU_FEATURE(dc, FLOAT128);
3055 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3056 break;
3057 case 0xc4: /* fitos */
3058 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3059 break;
3060 case 0xc6: /* fdtos */
3061 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3062 break;
3063 case 0xc7: /* fqtos */
3064 CHECK_FPU_FEATURE(dc, FLOAT128);
3065 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3066 break;
3067 case 0xc8: /* fitod */
3068 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3069 break;
3070 case 0xc9: /* fstod */
3071 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3072 break;
3073 case 0xcb: /* fqtod */
3074 CHECK_FPU_FEATURE(dc, FLOAT128);
3075 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3076 break;
3077 case 0xcc: /* fitoq */
3078 CHECK_FPU_FEATURE(dc, FLOAT128);
3079 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3080 break;
3081 case 0xcd: /* fstoq */
3082 CHECK_FPU_FEATURE(dc, FLOAT128);
3083 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3084 break;
3085 case 0xce: /* fdtoq */
3086 CHECK_FPU_FEATURE(dc, FLOAT128);
3087 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3088 break;
3089 case 0xd1: /* fstoi */
3090 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3091 break;
3092 case 0xd2: /* fdtoi */
3093 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3094 break;
3095 case 0xd3: /* fqtoi */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3098 break;
3099 #ifdef TARGET_SPARC64
3100 case 0x2: /* V9 fmovd */
3101 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3102 gen_store_fpr_D(dc, rd, cpu_src1_64);
3103 break;
3104 case 0x3: /* V9 fmovq */
3105 CHECK_FPU_FEATURE(dc, FLOAT128);
3106 gen_move_Q(rd, rs2);
3107 break;
3108 case 0x6: /* V9 fnegd */
3109 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3110 break;
3111 case 0x7: /* V9 fnegq */
3112 CHECK_FPU_FEATURE(dc, FLOAT128);
3113 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3114 break;
3115 case 0xa: /* V9 fabsd */
3116 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3117 break;
3118 case 0xb: /* V9 fabsq */
3119 CHECK_FPU_FEATURE(dc, FLOAT128);
3120 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3121 break;
3122 case 0x81: /* V9 fstox */
3123 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3124 break;
3125 case 0x82: /* V9 fdtox */
3126 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3127 break;
3128 case 0x83: /* V9 fqtox */
3129 CHECK_FPU_FEATURE(dc, FLOAT128);
3130 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3131 break;
3132 case 0x84: /* V9 fxtos */
3133 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3134 break;
3135 case 0x88: /* V9 fxtod */
3136 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3137 break;
3138 case 0x8c: /* V9 fxtoq */
3139 CHECK_FPU_FEATURE(dc, FLOAT128);
3140 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3141 break;
3142 #endif
3143 default:
3144 goto illegal_insn;
3146 } else if (xop == 0x35) { /* FPU Operations */
3147 #ifdef TARGET_SPARC64
3148 int cond;
3149 #endif
3150 if (gen_trap_ifnofpu(dc)) {
3151 goto jmp_insn;
3153 gen_op_clear_ieee_excp_and_FTT();
3154 rs1 = GET_FIELD(insn, 13, 17);
3155 rs2 = GET_FIELD(insn, 27, 31);
3156 xop = GET_FIELD(insn, 18, 26);
3157 save_state(dc);
3159 #ifdef TARGET_SPARC64
3160 #define FMOVR(sz) \
3161 do { \
3162 DisasCompare cmp; \
3163 cond = GET_FIELD_SP(insn, 10, 12); \
3164 cpu_src1 = get_src1(dc, insn); \
3165 gen_compare_reg(&cmp, cond, cpu_src1); \
3166 gen_fmov##sz(dc, &cmp, rd, rs2); \
3167 free_compare(&cmp); \
3168 } while (0)
3170 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3171 FMOVR(s);
3172 break;
3173 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3174 FMOVR(d);
3175 break;
3176 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3177 CHECK_FPU_FEATURE(dc, FLOAT128);
3178 FMOVR(q);
3179 break;
3181 #undef FMOVR
3182 #endif
3183 switch (xop) {
3184 #ifdef TARGET_SPARC64
3185 #define FMOVCC(fcc, sz) \
3186 do { \
3187 DisasCompare cmp; \
3188 cond = GET_FIELD_SP(insn, 14, 17); \
3189 gen_fcompare(&cmp, fcc, cond); \
3190 gen_fmov##sz(dc, &cmp, rd, rs2); \
3191 free_compare(&cmp); \
3192 } while (0)
3194 case 0x001: /* V9 fmovscc %fcc0 */
3195 FMOVCC(0, s);
3196 break;
3197 case 0x002: /* V9 fmovdcc %fcc0 */
3198 FMOVCC(0, d);
3199 break;
3200 case 0x003: /* V9 fmovqcc %fcc0 */
3201 CHECK_FPU_FEATURE(dc, FLOAT128);
3202 FMOVCC(0, q);
3203 break;
3204 case 0x041: /* V9 fmovscc %fcc1 */
3205 FMOVCC(1, s);
3206 break;
3207 case 0x042: /* V9 fmovdcc %fcc1 */
3208 FMOVCC(1, d);
3209 break;
3210 case 0x043: /* V9 fmovqcc %fcc1 */
3211 CHECK_FPU_FEATURE(dc, FLOAT128);
3212 FMOVCC(1, q);
3213 break;
3214 case 0x081: /* V9 fmovscc %fcc2 */
3215 FMOVCC(2, s);
3216 break;
3217 case 0x082: /* V9 fmovdcc %fcc2 */
3218 FMOVCC(2, d);
3219 break;
3220 case 0x083: /* V9 fmovqcc %fcc2 */
3221 CHECK_FPU_FEATURE(dc, FLOAT128);
3222 FMOVCC(2, q);
3223 break;
3224 case 0x0c1: /* V9 fmovscc %fcc3 */
3225 FMOVCC(3, s);
3226 break;
3227 case 0x0c2: /* V9 fmovdcc %fcc3 */
3228 FMOVCC(3, d);
3229 break;
3230 case 0x0c3: /* V9 fmovqcc %fcc3 */
3231 CHECK_FPU_FEATURE(dc, FLOAT128);
3232 FMOVCC(3, q);
3233 break;
3234 #undef FMOVCC
3235 #define FMOVCC(xcc, sz) \
3236 do { \
3237 DisasCompare cmp; \
3238 cond = GET_FIELD_SP(insn, 14, 17); \
3239 gen_compare(&cmp, xcc, cond, dc); \
3240 gen_fmov##sz(dc, &cmp, rd, rs2); \
3241 free_compare(&cmp); \
3242 } while (0)
3244 case 0x101: /* V9 fmovscc %icc */
3245 FMOVCC(0, s);
3246 break;
3247 case 0x102: /* V9 fmovdcc %icc */
3248 FMOVCC(0, d);
3249 break;
3250 case 0x103: /* V9 fmovqcc %icc */
3251 CHECK_FPU_FEATURE(dc, FLOAT128);
3252 FMOVCC(0, q);
3253 break;
3254 case 0x181: /* V9 fmovscc %xcc */
3255 FMOVCC(1, s);
3256 break;
3257 case 0x182: /* V9 fmovdcc %xcc */
3258 FMOVCC(1, d);
3259 break;
3260 case 0x183: /* V9 fmovqcc %xcc */
3261 CHECK_FPU_FEATURE(dc, FLOAT128);
3262 FMOVCC(1, q);
3263 break;
3264 #undef FMOVCC
3265 #endif
3266 case 0x51: /* fcmps, V9 %fcc */
3267 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3268 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3269 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3270 break;
3271 case 0x52: /* fcmpd, V9 %fcc */
3272 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3273 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3274 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3275 break;
3276 case 0x53: /* fcmpq, V9 %fcc */
3277 CHECK_FPU_FEATURE(dc, FLOAT128);
3278 gen_op_load_fpr_QT0(QFPREG(rs1));
3279 gen_op_load_fpr_QT1(QFPREG(rs2));
3280 gen_op_fcmpq(rd & 3);
3281 break;
3282 case 0x55: /* fcmpes, V9 %fcc */
3283 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3284 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3285 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3286 break;
3287 case 0x56: /* fcmped, V9 %fcc */
3288 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3289 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3290 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3291 break;
3292 case 0x57: /* fcmpeq, V9 %fcc */
3293 CHECK_FPU_FEATURE(dc, FLOAT128);
3294 gen_op_load_fpr_QT0(QFPREG(rs1));
3295 gen_op_load_fpr_QT1(QFPREG(rs2));
3296 gen_op_fcmpeq(rd & 3);
3297 break;
3298 default:
3299 goto illegal_insn;
3301 } else if (xop == 0x2) {
3302 TCGv dst = gen_dest_gpr(dc, rd);
3303 rs1 = GET_FIELD(insn, 13, 17);
3304 if (rs1 == 0) {
3305 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3306 if (IS_IMM) { /* immediate */
3307 simm = GET_FIELDs(insn, 19, 31);
3308 tcg_gen_movi_tl(dst, simm);
3309 gen_store_gpr(dc, rd, dst);
3310 } else { /* register */
3311 rs2 = GET_FIELD(insn, 27, 31);
3312 if (rs2 == 0) {
3313 tcg_gen_movi_tl(dst, 0);
3314 gen_store_gpr(dc, rd, dst);
3315 } else {
3316 cpu_src2 = gen_load_gpr(dc, rs2);
3317 gen_store_gpr(dc, rd, cpu_src2);
3320 } else {
3321 cpu_src1 = get_src1(dc, insn);
3322 if (IS_IMM) { /* immediate */
3323 simm = GET_FIELDs(insn, 19, 31);
3324 tcg_gen_ori_tl(dst, cpu_src1, simm);
3325 gen_store_gpr(dc, rd, dst);
3326 } else { /* register */
3327 rs2 = GET_FIELD(insn, 27, 31);
3328 if (rs2 == 0) {
3329 /* mov shortcut: or x, %g0, y -> mov x, y */
3330 gen_store_gpr(dc, rd, cpu_src1);
3331 } else {
3332 cpu_src2 = gen_load_gpr(dc, rs2);
3333 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3334 gen_store_gpr(dc, rd, dst);
3338 #ifdef TARGET_SPARC64
3339 } else if (xop == 0x25) { /* sll, V9 sllx */
3340 cpu_src1 = get_src1(dc, insn);
3341 if (IS_IMM) { /* immediate */
3342 simm = GET_FIELDs(insn, 20, 31);
3343 if (insn & (1 << 12)) {
3344 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3345 } else {
3346 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3348 } else { /* register */
3349 rs2 = GET_FIELD(insn, 27, 31);
3350 cpu_src2 = gen_load_gpr(dc, rs2);
3351 cpu_tmp0 = get_temp_tl(dc);
3352 if (insn & (1 << 12)) {
3353 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3354 } else {
3355 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3357 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3359 gen_store_gpr(dc, rd, cpu_dst);
3360 } else if (xop == 0x26) { /* srl, V9 srlx */
3361 cpu_src1 = get_src1(dc, insn);
3362 if (IS_IMM) { /* immediate */
3363 simm = GET_FIELDs(insn, 20, 31);
3364 if (insn & (1 << 12)) {
3365 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3366 } else {
3367 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3368 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3370 } else { /* register */
3371 rs2 = GET_FIELD(insn, 27, 31);
3372 cpu_src2 = gen_load_gpr(dc, rs2);
3373 cpu_tmp0 = get_temp_tl(dc);
3374 if (insn & (1 << 12)) {
3375 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3376 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3377 } else {
3378 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3379 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3380 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3383 gen_store_gpr(dc, rd, cpu_dst);
3384 } else if (xop == 0x27) { /* sra, V9 srax */
3385 cpu_src1 = get_src1(dc, insn);
3386 if (IS_IMM) { /* immediate */
3387 simm = GET_FIELDs(insn, 20, 31);
3388 if (insn & (1 << 12)) {
3389 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3390 } else {
3391 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3392 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3394 } else { /* register */
3395 rs2 = GET_FIELD(insn, 27, 31);
3396 cpu_src2 = gen_load_gpr(dc, rs2);
3397 cpu_tmp0 = get_temp_tl(dc);
3398 if (insn & (1 << 12)) {
3399 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3400 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3401 } else {
3402 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3403 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3404 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3407 gen_store_gpr(dc, rd, cpu_dst);
3408 #endif
3409 } else if (xop < 0x36) {
3410 if (xop < 0x20) {
3411 cpu_src1 = get_src1(dc, insn);
3412 cpu_src2 = get_src2(dc, insn);
3413 switch (xop & ~0x10) {
3414 case 0x0: /* add */
3415 if (xop & 0x10) {
3416 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3417 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3418 dc->cc_op = CC_OP_ADD;
3419 } else {
3420 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3422 break;
3423 case 0x1: /* and */
3424 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3425 if (xop & 0x10) {
3426 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3427 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3428 dc->cc_op = CC_OP_LOGIC;
3430 break;
3431 case 0x2: /* or */
3432 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3433 if (xop & 0x10) {
3434 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3435 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3436 dc->cc_op = CC_OP_LOGIC;
3438 break;
3439 case 0x3: /* xor */
3440 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3441 if (xop & 0x10) {
3442 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3443 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3444 dc->cc_op = CC_OP_LOGIC;
3446 break;
3447 case 0x4: /* sub */
3448 if (xop & 0x10) {
3449 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3450 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3451 dc->cc_op = CC_OP_SUB;
3452 } else {
3453 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3455 break;
3456 case 0x5: /* andn */
3457 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3458 if (xop & 0x10) {
3459 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3460 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3461 dc->cc_op = CC_OP_LOGIC;
3463 break;
3464 case 0x6: /* orn */
3465 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3466 if (xop & 0x10) {
3467 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3468 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3469 dc->cc_op = CC_OP_LOGIC;
3471 break;
3472 case 0x7: /* xorn */
3473 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3474 if (xop & 0x10) {
3475 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3476 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3477 dc->cc_op = CC_OP_LOGIC;
3479 break;
3480 case 0x8: /* addx, V9 addc */
3481 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3482 (xop & 0x10));
3483 break;
3484 #ifdef TARGET_SPARC64
3485 case 0x9: /* V9 mulx */
3486 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3487 break;
3488 #endif
3489 case 0xa: /* umul */
3490 CHECK_IU_FEATURE(dc, MUL);
3491 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3492 if (xop & 0x10) {
3493 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3494 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3495 dc->cc_op = CC_OP_LOGIC;
3497 break;
3498 case 0xb: /* smul */
3499 CHECK_IU_FEATURE(dc, MUL);
3500 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3501 if (xop & 0x10) {
3502 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3503 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3504 dc->cc_op = CC_OP_LOGIC;
3506 break;
3507 case 0xc: /* subx, V9 subc */
3508 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3509 (xop & 0x10));
3510 break;
3511 #ifdef TARGET_SPARC64
3512 case 0xd: /* V9 udivx */
3513 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3514 break;
3515 #endif
3516 case 0xe: /* udiv */
3517 CHECK_IU_FEATURE(dc, DIV);
3518 if (xop & 0x10) {
3519 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3520 cpu_src2);
3521 dc->cc_op = CC_OP_DIV;
3522 } else {
3523 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3524 cpu_src2);
3526 break;
3527 case 0xf: /* sdiv */
3528 CHECK_IU_FEATURE(dc, DIV);
3529 if (xop & 0x10) {
3530 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3531 cpu_src2);
3532 dc->cc_op = CC_OP_DIV;
3533 } else {
3534 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3535 cpu_src2);
3537 break;
3538 default:
3539 goto illegal_insn;
3541 gen_store_gpr(dc, rd, cpu_dst);
3542 } else {
3543 cpu_src1 = get_src1(dc, insn);
3544 cpu_src2 = get_src2(dc, insn);
3545 switch (xop) {
3546 case 0x20: /* taddcc */
3547 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3548 gen_store_gpr(dc, rd, cpu_dst);
3549 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3550 dc->cc_op = CC_OP_TADD;
3551 break;
3552 case 0x21: /* tsubcc */
3553 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3554 gen_store_gpr(dc, rd, cpu_dst);
3555 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3556 dc->cc_op = CC_OP_TSUB;
3557 break;
3558 case 0x22: /* taddcctv */
3559 gen_helper_taddcctv(cpu_dst, cpu_env,
3560 cpu_src1, cpu_src2);
3561 gen_store_gpr(dc, rd, cpu_dst);
3562 dc->cc_op = CC_OP_TADDTV;
3563 break;
3564 case 0x23: /* tsubcctv */
3565 gen_helper_tsubcctv(cpu_dst, cpu_env,
3566 cpu_src1, cpu_src2);
3567 gen_store_gpr(dc, rd, cpu_dst);
3568 dc->cc_op = CC_OP_TSUBTV;
3569 break;
3570 case 0x24: /* mulscc */
3571 update_psr(dc);
3572 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3573 gen_store_gpr(dc, rd, cpu_dst);
3574 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3575 dc->cc_op = CC_OP_ADD;
3576 break;
3577 #ifndef TARGET_SPARC64
3578 case 0x25: /* sll */
3579 if (IS_IMM) { /* immediate */
3580 simm = GET_FIELDs(insn, 20, 31);
3581 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3582 } else { /* register */
3583 cpu_tmp0 = get_temp_tl(dc);
3584 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3585 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3587 gen_store_gpr(dc, rd, cpu_dst);
3588 break;
3589 case 0x26: /* srl */
3590 if (IS_IMM) { /* immediate */
3591 simm = GET_FIELDs(insn, 20, 31);
3592 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3593 } else { /* register */
3594 cpu_tmp0 = get_temp_tl(dc);
3595 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3596 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3598 gen_store_gpr(dc, rd, cpu_dst);
3599 break;
3600 case 0x27: /* sra */
3601 if (IS_IMM) { /* immediate */
3602 simm = GET_FIELDs(insn, 20, 31);
3603 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3604 } else { /* register */
3605 cpu_tmp0 = get_temp_tl(dc);
3606 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3607 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3609 gen_store_gpr(dc, rd, cpu_dst);
3610 break;
3611 #endif
3612 case 0x30:
3614 cpu_tmp0 = get_temp_tl(dc);
3615 switch(rd) {
3616 case 0: /* wry */
3617 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3618 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3619 break;
3620 #ifndef TARGET_SPARC64
3621 case 0x01 ... 0x0f: /* undefined in the
3622 SPARCv8 manual, nop
3623 on the microSPARC
3624 II */
3625 case 0x10 ... 0x1f: /* implementation-dependent
3626 in the SPARCv8
3627 manual, nop on the
3628 microSPARC II */
3629 if ((rd == 0x13) && (dc->def->features &
3630 CPU_FEATURE_POWERDOWN)) {
3631 /* LEON3 power-down */
3632 save_state(dc);
3633 gen_helper_power_down(cpu_env);
3635 break;
3636 #else
3637 case 0x2: /* V9 wrccr */
3638 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3639 gen_helper_wrccr(cpu_env, cpu_tmp0);
3640 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3641 dc->cc_op = CC_OP_FLAGS;
3642 break;
3643 case 0x3: /* V9 wrasi */
3644 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3645 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3646 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3647 break;
3648 case 0x6: /* V9 wrfprs */
3649 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3650 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3651 save_state(dc);
3652 gen_op_next_insn();
3653 tcg_gen_exit_tb(0);
3654 dc->is_br = 1;
3655 break;
3656 case 0xf: /* V9 sir, nop if user */
3657 #if !defined(CONFIG_USER_ONLY)
3658 if (supervisor(dc)) {
3659 ; // XXX
3661 #endif
3662 break;
3663 case 0x13: /* Graphics Status */
3664 if (gen_trap_ifnofpu(dc)) {
3665 goto jmp_insn;
3667 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3668 break;
3669 case 0x14: /* Softint set */
3670 if (!supervisor(dc))
3671 goto illegal_insn;
3672 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3673 gen_helper_set_softint(cpu_env, cpu_tmp0);
3674 break;
3675 case 0x15: /* Softint clear */
3676 if (!supervisor(dc))
3677 goto illegal_insn;
3678 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3679 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3680 break;
3681 case 0x16: /* Softint write */
3682 if (!supervisor(dc))
3683 goto illegal_insn;
3684 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3685 gen_helper_write_softint(cpu_env, cpu_tmp0);
3686 break;
3687 case 0x17: /* Tick compare */
3688 #if !defined(CONFIG_USER_ONLY)
3689 if (!supervisor(dc))
3690 goto illegal_insn;
3691 #endif
3693 TCGv_ptr r_tickptr;
3695 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3696 cpu_src2);
3697 r_tickptr = tcg_temp_new_ptr();
3698 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3699 offsetof(CPUSPARCState, tick));
3700 gen_helper_tick_set_limit(r_tickptr,
3701 cpu_tick_cmpr);
3702 tcg_temp_free_ptr(r_tickptr);
3704 break;
3705 case 0x18: /* System tick */
3706 #if !defined(CONFIG_USER_ONLY)
3707 if (!supervisor(dc))
3708 goto illegal_insn;
3709 #endif
3711 TCGv_ptr r_tickptr;
3713 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3714 cpu_src2);
3715 r_tickptr = tcg_temp_new_ptr();
3716 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3717 offsetof(CPUSPARCState, stick));
3718 gen_helper_tick_set_count(r_tickptr,
3719 cpu_tmp0);
3720 tcg_temp_free_ptr(r_tickptr);
3722 break;
3723 case 0x19: /* System tick compare */
3724 #if !defined(CONFIG_USER_ONLY)
3725 if (!supervisor(dc))
3726 goto illegal_insn;
3727 #endif
3729 TCGv_ptr r_tickptr;
3731 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3732 cpu_src2);
3733 r_tickptr = tcg_temp_new_ptr();
3734 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3735 offsetof(CPUSPARCState, stick));
3736 gen_helper_tick_set_limit(r_tickptr,
3737 cpu_stick_cmpr);
3738 tcg_temp_free_ptr(r_tickptr);
3740 break;
3742 case 0x10: /* Performance Control */
3743 case 0x11: /* Performance Instrumentation
3744 Counter */
3745 case 0x12: /* Dispatch Control */
3746 #endif
3747 default:
3748 goto illegal_insn;
3751 break;
3752 #if !defined(CONFIG_USER_ONLY)
3753 case 0x31: /* wrpsr, V9 saved, restored */
3755 if (!supervisor(dc))
3756 goto priv_insn;
3757 #ifdef TARGET_SPARC64
3758 switch (rd) {
3759 case 0:
3760 gen_helper_saved(cpu_env);
3761 break;
3762 case 1:
3763 gen_helper_restored(cpu_env);
3764 break;
3765 case 2: /* UA2005 allclean */
3766 case 3: /* UA2005 otherw */
3767 case 4: /* UA2005 normalw */
3768 case 5: /* UA2005 invalw */
3769 // XXX
3770 default:
3771 goto illegal_insn;
3773 #else
3774 cpu_tmp0 = get_temp_tl(dc);
3775 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3776 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3777 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3778 dc->cc_op = CC_OP_FLAGS;
3779 save_state(dc);
3780 gen_op_next_insn();
3781 tcg_gen_exit_tb(0);
3782 dc->is_br = 1;
3783 #endif
3785 break;
3786 case 0x32: /* wrwim, V9 wrpr */
3788 if (!supervisor(dc))
3789 goto priv_insn;
3790 cpu_tmp0 = get_temp_tl(dc);
3791 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3792 #ifdef TARGET_SPARC64
3793 switch (rd) {
3794 case 0: // tpc
3796 TCGv_ptr r_tsptr;
3798 r_tsptr = tcg_temp_new_ptr();
3799 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3800 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3801 offsetof(trap_state, tpc));
3802 tcg_temp_free_ptr(r_tsptr);
3804 break;
3805 case 1: // tnpc
3807 TCGv_ptr r_tsptr;
3809 r_tsptr = tcg_temp_new_ptr();
3810 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3811 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3812 offsetof(trap_state, tnpc));
3813 tcg_temp_free_ptr(r_tsptr);
3815 break;
3816 case 2: // tstate
3818 TCGv_ptr r_tsptr;
3820 r_tsptr = tcg_temp_new_ptr();
3821 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3822 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3823 offsetof(trap_state,
3824 tstate));
3825 tcg_temp_free_ptr(r_tsptr);
3827 break;
3828 case 3: // tt
3830 TCGv_ptr r_tsptr;
3832 r_tsptr = tcg_temp_new_ptr();
3833 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3834 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3835 offsetof(trap_state, tt));
3836 tcg_temp_free_ptr(r_tsptr);
3838 break;
3839 case 4: // tick
3841 TCGv_ptr r_tickptr;
3843 r_tickptr = tcg_temp_new_ptr();
3844 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3845 offsetof(CPUSPARCState, tick));
3846 gen_helper_tick_set_count(r_tickptr,
3847 cpu_tmp0);
3848 tcg_temp_free_ptr(r_tickptr);
3850 break;
3851 case 5: // tba
3852 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3853 break;
3854 case 6: // pstate
3855 save_state(dc);
3856 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3857 dc->npc = DYNAMIC_PC;
3858 break;
3859 case 7: // tl
3860 save_state(dc);
3861 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3862 offsetof(CPUSPARCState, tl));
3863 dc->npc = DYNAMIC_PC;
3864 break;
3865 case 8: // pil
3866 gen_helper_wrpil(cpu_env, cpu_tmp0);
3867 break;
3868 case 9: // cwp
3869 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3870 break;
3871 case 10: // cansave
3872 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3873 offsetof(CPUSPARCState,
3874 cansave));
3875 break;
3876 case 11: // canrestore
3877 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3878 offsetof(CPUSPARCState,
3879 canrestore));
3880 break;
3881 case 12: // cleanwin
3882 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3883 offsetof(CPUSPARCState,
3884 cleanwin));
3885 break;
3886 case 13: // otherwin
3887 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3888 offsetof(CPUSPARCState,
3889 otherwin));
3890 break;
3891 case 14: // wstate
3892 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3893 offsetof(CPUSPARCState,
3894 wstate));
3895 break;
3896 case 16: // UA2005 gl
3897 CHECK_IU_FEATURE(dc, GL);
3898 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3899 offsetof(CPUSPARCState, gl));
3900 break;
3901 case 26: // UA2005 strand status
3902 CHECK_IU_FEATURE(dc, HYPV);
3903 if (!hypervisor(dc))
3904 goto priv_insn;
3905 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3906 break;
3907 default:
3908 goto illegal_insn;
3910 #else
3911 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3912 if (dc->def->nwindows != 32) {
3913 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3914 (1 << dc->def->nwindows) - 1);
3916 #endif
3918 break;
3919 case 0x33: /* wrtbr, UA2005 wrhpr */
3921 #ifndef TARGET_SPARC64
3922 if (!supervisor(dc))
3923 goto priv_insn;
3924 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3925 #else
3926 CHECK_IU_FEATURE(dc, HYPV);
3927 if (!hypervisor(dc))
3928 goto priv_insn;
3929 cpu_tmp0 = get_temp_tl(dc);
3930 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3931 switch (rd) {
3932 case 0: // hpstate
3933 // XXX gen_op_wrhpstate();
3934 save_state(dc);
3935 gen_op_next_insn();
3936 tcg_gen_exit_tb(0);
3937 dc->is_br = 1;
3938 break;
3939 case 1: // htstate
3940 // XXX gen_op_wrhtstate();
3941 break;
3942 case 3: // hintp
3943 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3944 break;
3945 case 5: // htba
3946 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3947 break;
3948 case 31: // hstick_cmpr
3950 TCGv_ptr r_tickptr;
3952 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3953 r_tickptr = tcg_temp_new_ptr();
3954 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3955 offsetof(CPUSPARCState, hstick));
3956 gen_helper_tick_set_limit(r_tickptr,
3957 cpu_hstick_cmpr);
3958 tcg_temp_free_ptr(r_tickptr);
3960 break;
3961 case 6: // hver readonly
3962 default:
3963 goto illegal_insn;
3965 #endif
3967 break;
3968 #endif
3969 #ifdef TARGET_SPARC64
3970 case 0x2c: /* V9 movcc */
3972 int cc = GET_FIELD_SP(insn, 11, 12);
3973 int cond = GET_FIELD_SP(insn, 14, 17);
3974 DisasCompare cmp;
3975 TCGv dst;
3977 if (insn & (1 << 18)) {
3978 if (cc == 0) {
3979 gen_compare(&cmp, 0, cond, dc);
3980 } else if (cc == 2) {
3981 gen_compare(&cmp, 1, cond, dc);
3982 } else {
3983 goto illegal_insn;
3985 } else {
3986 gen_fcompare(&cmp, cc, cond);
3989 /* The get_src2 above loaded the normal 13-bit
3990 immediate field, not the 11-bit field we have
3991 in movcc. But it did handle the reg case. */
3992 if (IS_IMM) {
3993 simm = GET_FIELD_SPs(insn, 0, 10);
3994 tcg_gen_movi_tl(cpu_src2, simm);
3997 dst = gen_load_gpr(dc, rd);
3998 tcg_gen_movcond_tl(cmp.cond, dst,
3999 cmp.c1, cmp.c2,
4000 cpu_src2, dst);
4001 free_compare(&cmp);
4002 gen_store_gpr(dc, rd, dst);
4003 break;
4005 case 0x2d: /* V9 sdivx */
4006 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4007 gen_store_gpr(dc, rd, cpu_dst);
4008 break;
4009 case 0x2e: /* V9 popc */
4010 gen_helper_popc(cpu_dst, cpu_src2);
4011 gen_store_gpr(dc, rd, cpu_dst);
4012 break;
4013 case 0x2f: /* V9 movr */
4015 int cond = GET_FIELD_SP(insn, 10, 12);
4016 DisasCompare cmp;
4017 TCGv dst;
4019 gen_compare_reg(&cmp, cond, cpu_src1);
4021 /* The get_src2 above loaded the normal 13-bit
4022 immediate field, not the 10-bit field we have
4023 in movr. But it did handle the reg case. */
4024 if (IS_IMM) {
4025 simm = GET_FIELD_SPs(insn, 0, 9);
4026 tcg_gen_movi_tl(cpu_src2, simm);
4029 dst = gen_load_gpr(dc, rd);
4030 tcg_gen_movcond_tl(cmp.cond, dst,
4031 cmp.c1, cmp.c2,
4032 cpu_src2, dst);
4033 free_compare(&cmp);
4034 gen_store_gpr(dc, rd, dst);
4035 break;
4037 #endif
4038 default:
4039 goto illegal_insn;
4042 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4043 #ifdef TARGET_SPARC64
4044 int opf = GET_FIELD_SP(insn, 5, 13);
4045 rs1 = GET_FIELD(insn, 13, 17);
4046 rs2 = GET_FIELD(insn, 27, 31);
4047 if (gen_trap_ifnofpu(dc)) {
4048 goto jmp_insn;
4051 switch (opf) {
4052 case 0x000: /* VIS I edge8cc */
4053 CHECK_FPU_FEATURE(dc, VIS1);
4054 cpu_src1 = gen_load_gpr(dc, rs1);
4055 cpu_src2 = gen_load_gpr(dc, rs2);
4056 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4057 gen_store_gpr(dc, rd, cpu_dst);
4058 break;
4059 case 0x001: /* VIS II edge8n */
4060 CHECK_FPU_FEATURE(dc, VIS2);
4061 cpu_src1 = gen_load_gpr(dc, rs1);
4062 cpu_src2 = gen_load_gpr(dc, rs2);
4063 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4064 gen_store_gpr(dc, rd, cpu_dst);
4065 break;
4066 case 0x002: /* VIS I edge8lcc */
4067 CHECK_FPU_FEATURE(dc, VIS1);
4068 cpu_src1 = gen_load_gpr(dc, rs1);
4069 cpu_src2 = gen_load_gpr(dc, rs2);
4070 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4071 gen_store_gpr(dc, rd, cpu_dst);
4072 break;
4073 case 0x003: /* VIS II edge8ln */
4074 CHECK_FPU_FEATURE(dc, VIS2);
4075 cpu_src1 = gen_load_gpr(dc, rs1);
4076 cpu_src2 = gen_load_gpr(dc, rs2);
4077 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4078 gen_store_gpr(dc, rd, cpu_dst);
4079 break;
4080 case 0x004: /* VIS I edge16cc */
4081 CHECK_FPU_FEATURE(dc, VIS1);
4082 cpu_src1 = gen_load_gpr(dc, rs1);
4083 cpu_src2 = gen_load_gpr(dc, rs2);
4084 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4085 gen_store_gpr(dc, rd, cpu_dst);
4086 break;
4087 case 0x005: /* VIS II edge16n */
4088 CHECK_FPU_FEATURE(dc, VIS2);
4089 cpu_src1 = gen_load_gpr(dc, rs1);
4090 cpu_src2 = gen_load_gpr(dc, rs2);
4091 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4092 gen_store_gpr(dc, rd, cpu_dst);
4093 break;
4094 case 0x006: /* VIS I edge16lcc */
4095 CHECK_FPU_FEATURE(dc, VIS1);
4096 cpu_src1 = gen_load_gpr(dc, rs1);
4097 cpu_src2 = gen_load_gpr(dc, rs2);
4098 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4099 gen_store_gpr(dc, rd, cpu_dst);
4100 break;
4101 case 0x007: /* VIS II edge16ln */
4102 CHECK_FPU_FEATURE(dc, VIS2);
4103 cpu_src1 = gen_load_gpr(dc, rs1);
4104 cpu_src2 = gen_load_gpr(dc, rs2);
4105 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4106 gen_store_gpr(dc, rd, cpu_dst);
4107 break;
4108 case 0x008: /* VIS I edge32cc */
4109 CHECK_FPU_FEATURE(dc, VIS1);
4110 cpu_src1 = gen_load_gpr(dc, rs1);
4111 cpu_src2 = gen_load_gpr(dc, rs2);
4112 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4113 gen_store_gpr(dc, rd, cpu_dst);
4114 break;
4115 case 0x009: /* VIS II edge32n */
4116 CHECK_FPU_FEATURE(dc, VIS2);
4117 cpu_src1 = gen_load_gpr(dc, rs1);
4118 cpu_src2 = gen_load_gpr(dc, rs2);
4119 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4120 gen_store_gpr(dc, rd, cpu_dst);
4121 break;
4122 case 0x00a: /* VIS I edge32lcc */
4123 CHECK_FPU_FEATURE(dc, VIS1);
4124 cpu_src1 = gen_load_gpr(dc, rs1);
4125 cpu_src2 = gen_load_gpr(dc, rs2);
4126 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4127 gen_store_gpr(dc, rd, cpu_dst);
4128 break;
4129 case 0x00b: /* VIS II edge32ln */
4130 CHECK_FPU_FEATURE(dc, VIS2);
4131 cpu_src1 = gen_load_gpr(dc, rs1);
4132 cpu_src2 = gen_load_gpr(dc, rs2);
4133 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4134 gen_store_gpr(dc, rd, cpu_dst);
4135 break;
4136 case 0x010: /* VIS I array8 */
4137 CHECK_FPU_FEATURE(dc, VIS1);
4138 cpu_src1 = gen_load_gpr(dc, rs1);
4139 cpu_src2 = gen_load_gpr(dc, rs2);
4140 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4141 gen_store_gpr(dc, rd, cpu_dst);
4142 break;
4143 case 0x012: /* VIS I array16 */
4144 CHECK_FPU_FEATURE(dc, VIS1);
4145 cpu_src1 = gen_load_gpr(dc, rs1);
4146 cpu_src2 = gen_load_gpr(dc, rs2);
4147 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4148 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4149 gen_store_gpr(dc, rd, cpu_dst);
4150 break;
4151 case 0x014: /* VIS I array32 */
4152 CHECK_FPU_FEATURE(dc, VIS1);
4153 cpu_src1 = gen_load_gpr(dc, rs1);
4154 cpu_src2 = gen_load_gpr(dc, rs2);
4155 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4156 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4157 gen_store_gpr(dc, rd, cpu_dst);
4158 break;
4159 case 0x018: /* VIS I alignaddr */
4160 CHECK_FPU_FEATURE(dc, VIS1);
4161 cpu_src1 = gen_load_gpr(dc, rs1);
4162 cpu_src2 = gen_load_gpr(dc, rs2);
4163 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4164 gen_store_gpr(dc, rd, cpu_dst);
4165 break;
4166 case 0x01a: /* VIS I alignaddrl */
4167 CHECK_FPU_FEATURE(dc, VIS1);
4168 cpu_src1 = gen_load_gpr(dc, rs1);
4169 cpu_src2 = gen_load_gpr(dc, rs2);
4170 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4171 gen_store_gpr(dc, rd, cpu_dst);
4172 break;
4173 case 0x019: /* VIS II bmask */
4174 CHECK_FPU_FEATURE(dc, VIS2);
4175 cpu_src1 = gen_load_gpr(dc, rs1);
4176 cpu_src2 = gen_load_gpr(dc, rs2);
4177 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4178 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4179 gen_store_gpr(dc, rd, cpu_dst);
4180 break;
4181 case 0x020: /* VIS I fcmple16 */
4182 CHECK_FPU_FEATURE(dc, VIS1);
4183 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4184 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4185 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4186 gen_store_gpr(dc, rd, cpu_dst);
4187 break;
4188 case 0x022: /* VIS I fcmpne16 */
4189 CHECK_FPU_FEATURE(dc, VIS1);
4190 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4191 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4192 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4193 gen_store_gpr(dc, rd, cpu_dst);
4194 break;
4195 case 0x024: /* VIS I fcmple32 */
4196 CHECK_FPU_FEATURE(dc, VIS1);
4197 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4198 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4199 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4200 gen_store_gpr(dc, rd, cpu_dst);
4201 break;
4202 case 0x026: /* VIS I fcmpne32 */
4203 CHECK_FPU_FEATURE(dc, VIS1);
4204 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4205 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4206 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4207 gen_store_gpr(dc, rd, cpu_dst);
4208 break;
4209 case 0x028: /* VIS I fcmpgt16 */
4210 CHECK_FPU_FEATURE(dc, VIS1);
4211 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4212 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4213 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4214 gen_store_gpr(dc, rd, cpu_dst);
4215 break;
4216 case 0x02a: /* VIS I fcmpeq16 */
4217 CHECK_FPU_FEATURE(dc, VIS1);
4218 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4219 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4220 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4221 gen_store_gpr(dc, rd, cpu_dst);
4222 break;
4223 case 0x02c: /* VIS I fcmpgt32 */
4224 CHECK_FPU_FEATURE(dc, VIS1);
4225 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4226 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4227 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4228 gen_store_gpr(dc, rd, cpu_dst);
4229 break;
4230 case 0x02e: /* VIS I fcmpeq32 */
4231 CHECK_FPU_FEATURE(dc, VIS1);
4232 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4233 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4234 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4235 gen_store_gpr(dc, rd, cpu_dst);
4236 break;
4237 case 0x031: /* VIS I fmul8x16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4240 break;
4241 case 0x033: /* VIS I fmul8x16au */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4244 break;
4245 case 0x035: /* VIS I fmul8x16al */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4248 break;
4249 case 0x036: /* VIS I fmul8sux16 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4252 break;
4253 case 0x037: /* VIS I fmul8ulx16 */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4256 break;
4257 case 0x038: /* VIS I fmuld8sux16 */
4258 CHECK_FPU_FEATURE(dc, VIS1);
4259 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4260 break;
4261 case 0x039: /* VIS I fmuld8ulx16 */
4262 CHECK_FPU_FEATURE(dc, VIS1);
4263 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4264 break;
4265 case 0x03a: /* VIS I fpack32 */
4266 CHECK_FPU_FEATURE(dc, VIS1);
4267 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4268 break;
4269 case 0x03b: /* VIS I fpack16 */
4270 CHECK_FPU_FEATURE(dc, VIS1);
4271 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4272 cpu_dst_32 = gen_dest_fpr_F(dc);
4273 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4274 gen_store_fpr_F(dc, rd, cpu_dst_32);
4275 break;
4276 case 0x03d: /* VIS I fpackfix */
4277 CHECK_FPU_FEATURE(dc, VIS1);
4278 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4279 cpu_dst_32 = gen_dest_fpr_F(dc);
4280 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4281 gen_store_fpr_F(dc, rd, cpu_dst_32);
4282 break;
4283 case 0x03e: /* VIS I pdist */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4286 break;
4287 case 0x048: /* VIS I faligndata */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4290 break;
4291 case 0x04b: /* VIS I fpmerge */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4294 break;
4295 case 0x04c: /* VIS II bshuffle */
4296 CHECK_FPU_FEATURE(dc, VIS2);
4297 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4298 break;
4299 case 0x04d: /* VIS I fexpand */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4302 break;
4303 case 0x050: /* VIS I fpadd16 */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4306 break;
4307 case 0x051: /* VIS I fpadd16s */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4310 break;
4311 case 0x052: /* VIS I fpadd32 */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4314 break;
4315 case 0x053: /* VIS I fpadd32s */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4318 break;
4319 case 0x054: /* VIS I fpsub16 */
4320 CHECK_FPU_FEATURE(dc, VIS1);
4321 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4322 break;
4323 case 0x055: /* VIS I fpsub16s */
4324 CHECK_FPU_FEATURE(dc, VIS1);
4325 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4326 break;
4327 case 0x056: /* VIS I fpsub32 */
4328 CHECK_FPU_FEATURE(dc, VIS1);
4329 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4330 break;
4331 case 0x057: /* VIS I fpsub32s */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4334 break;
4335 case 0x060: /* VIS I fzero */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4338 tcg_gen_movi_i64(cpu_dst_64, 0);
4339 gen_store_fpr_D(dc, rd, cpu_dst_64);
4340 break;
4341 case 0x061: /* VIS I fzeros */
4342 CHECK_FPU_FEATURE(dc, VIS1);
4343 cpu_dst_32 = gen_dest_fpr_F(dc);
4344 tcg_gen_movi_i32(cpu_dst_32, 0);
4345 gen_store_fpr_F(dc, rd, cpu_dst_32);
4346 break;
4347 case 0x062: /* VIS I fnor */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4350 break;
4351 case 0x063: /* VIS I fnors */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4354 break;
4355 case 0x064: /* VIS I fandnot2 */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4358 break;
4359 case 0x065: /* VIS I fandnot2s */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4362 break;
4363 case 0x066: /* VIS I fnot2 */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4366 break;
4367 case 0x067: /* VIS I fnot2s */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4370 break;
4371 case 0x068: /* VIS I fandnot1 */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4374 break;
4375 case 0x069: /* VIS I fandnot1s */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4378 break;
4379 case 0x06a: /* VIS I fnot1 */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4382 break;
4383 case 0x06b: /* VIS I fnot1s */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4386 break;
4387 case 0x06c: /* VIS I fxor */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4390 break;
4391 case 0x06d: /* VIS I fxors */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4394 break;
4395 case 0x06e: /* VIS I fnand */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4398 break;
4399 case 0x06f: /* VIS I fnands */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4402 break;
4403 case 0x070: /* VIS I fand */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4406 break;
4407 case 0x071: /* VIS I fands */
4408 CHECK_FPU_FEATURE(dc, VIS1);
4409 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4410 break;
4411 case 0x072: /* VIS I fxnor */
4412 CHECK_FPU_FEATURE(dc, VIS1);
4413 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4414 break;
4415 case 0x073: /* VIS I fxnors */
4416 CHECK_FPU_FEATURE(dc, VIS1);
4417 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4418 break;
4419 case 0x074: /* VIS I fsrc1 */
4420 CHECK_FPU_FEATURE(dc, VIS1);
4421 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4422 gen_store_fpr_D(dc, rd, cpu_src1_64);
4423 break;
4424 case 0x075: /* VIS I fsrc1s */
4425 CHECK_FPU_FEATURE(dc, VIS1);
4426 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4427 gen_store_fpr_F(dc, rd, cpu_src1_32);
4428 break;
4429 case 0x076: /* VIS I fornot2 */
4430 CHECK_FPU_FEATURE(dc, VIS1);
4431 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4432 break;
4433 case 0x077: /* VIS I fornot2s */
4434 CHECK_FPU_FEATURE(dc, VIS1);
4435 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4436 break;
4437 case 0x078: /* VIS I fsrc2 */
4438 CHECK_FPU_FEATURE(dc, VIS1);
4439 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4440 gen_store_fpr_D(dc, rd, cpu_src1_64);
4441 break;
4442 case 0x079: /* VIS I fsrc2s */
4443 CHECK_FPU_FEATURE(dc, VIS1);
4444 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4445 gen_store_fpr_F(dc, rd, cpu_src1_32);
4446 break;
4447 case 0x07a: /* VIS I fornot1 */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4450 break;
4451 case 0x07b: /* VIS I fornot1s */
4452 CHECK_FPU_FEATURE(dc, VIS1);
4453 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4454 break;
4455 case 0x07c: /* VIS I for */
4456 CHECK_FPU_FEATURE(dc, VIS1);
4457 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4458 break;
4459 case 0x07d: /* VIS I fors */
4460 CHECK_FPU_FEATURE(dc, VIS1);
4461 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4462 break;
4463 case 0x07e: /* VIS I fone */
4464 CHECK_FPU_FEATURE(dc, VIS1);
4465 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4466 tcg_gen_movi_i64(cpu_dst_64, -1);
4467 gen_store_fpr_D(dc, rd, cpu_dst_64);
4468 break;
4469 case 0x07f: /* VIS I fones */
4470 CHECK_FPU_FEATURE(dc, VIS1);
4471 cpu_dst_32 = gen_dest_fpr_F(dc);
4472 tcg_gen_movi_i32(cpu_dst_32, -1);
4473 gen_store_fpr_F(dc, rd, cpu_dst_32);
4474 break;
4475 case 0x080: /* VIS I shutdown */
4476 case 0x081: /* VIS II siam */
4477 // XXX
4478 goto illegal_insn;
4479 default:
4480 goto illegal_insn;
4482 #else
4483 goto ncp_insn;
4484 #endif
4485 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4486 #ifdef TARGET_SPARC64
4487 goto illegal_insn;
4488 #else
4489 goto ncp_insn;
4490 #endif
4491 #ifdef TARGET_SPARC64
4492 } else if (xop == 0x39) { /* V9 return */
4493 TCGv_i32 r_const;
4495 save_state(dc);
4496 cpu_src1 = get_src1(dc, insn);
4497 cpu_tmp0 = get_temp_tl(dc);
4498 if (IS_IMM) { /* immediate */
4499 simm = GET_FIELDs(insn, 19, 31);
4500 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4501 } else { /* register */
4502 rs2 = GET_FIELD(insn, 27, 31);
4503 if (rs2) {
4504 cpu_src2 = gen_load_gpr(dc, rs2);
4505 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4506 } else {
4507 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4510 gen_helper_restore(cpu_env);
4511 gen_mov_pc_npc(dc);
4512 r_const = tcg_const_i32(3);
4513 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4514 tcg_temp_free_i32(r_const);
4515 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4516 dc->npc = DYNAMIC_PC;
4517 goto jmp_insn;
4518 #endif
4519 } else {
4520 cpu_src1 = get_src1(dc, insn);
4521 cpu_tmp0 = get_temp_tl(dc);
4522 if (IS_IMM) { /* immediate */
4523 simm = GET_FIELDs(insn, 19, 31);
4524 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4525 } else { /* register */
4526 rs2 = GET_FIELD(insn, 27, 31);
4527 if (rs2) {
4528 cpu_src2 = gen_load_gpr(dc, rs2);
4529 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4530 } else {
4531 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4534 switch (xop) {
4535 case 0x38: /* jmpl */
4537 TCGv t;
4538 TCGv_i32 r_const;
4540 t = gen_dest_gpr(dc, rd);
4541 tcg_gen_movi_tl(t, dc->pc);
4542 gen_store_gpr(dc, rd, t);
4543 gen_mov_pc_npc(dc);
4544 r_const = tcg_const_i32(3);
4545 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4546 tcg_temp_free_i32(r_const);
4547 gen_address_mask(dc, cpu_tmp0);
4548 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4549 dc->npc = DYNAMIC_PC;
4551 goto jmp_insn;
4552 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4553 case 0x39: /* rett, V9 return */
4555 TCGv_i32 r_const;
4557 if (!supervisor(dc))
4558 goto priv_insn;
4559 gen_mov_pc_npc(dc);
4560 r_const = tcg_const_i32(3);
4561 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4562 tcg_temp_free_i32(r_const);
4563 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4564 dc->npc = DYNAMIC_PC;
4565 gen_helper_rett(cpu_env);
4567 goto jmp_insn;
4568 #endif
4569 case 0x3b: /* flush */
4570 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4571 goto unimp_flush;
4572 /* nop */
4573 break;
4574 case 0x3c: /* save */
4575 save_state(dc);
4576 gen_helper_save(cpu_env);
4577 gen_store_gpr(dc, rd, cpu_tmp0);
4578 break;
4579 case 0x3d: /* restore */
4580 save_state(dc);
4581 gen_helper_restore(cpu_env);
4582 gen_store_gpr(dc, rd, cpu_tmp0);
4583 break;
4584 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4585 case 0x3e: /* V9 done/retry */
4587 switch (rd) {
4588 case 0:
4589 if (!supervisor(dc))
4590 goto priv_insn;
4591 dc->npc = DYNAMIC_PC;
4592 dc->pc = DYNAMIC_PC;
4593 gen_helper_done(cpu_env);
4594 goto jmp_insn;
4595 case 1:
4596 if (!supervisor(dc))
4597 goto priv_insn;
4598 dc->npc = DYNAMIC_PC;
4599 dc->pc = DYNAMIC_PC;
4600 gen_helper_retry(cpu_env);
4601 goto jmp_insn;
4602 default:
4603 goto illegal_insn;
4606 break;
4607 #endif
4608 default:
4609 goto illegal_insn;
4612 break;
4614 break;
4615 case 3: /* load/store instructions */
4617 unsigned int xop = GET_FIELD(insn, 7, 12);
4618 /* ??? gen_address_mask prevents us from using a source
4619 register directly. Always generate a temporary. */
4620 TCGv cpu_addr = get_temp_tl(dc);
4622 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4623 if (xop == 0x3c || xop == 0x3e) {
4624 /* V9 casa/casxa : no offset */
4625 } else if (IS_IMM) { /* immediate */
4626 simm = GET_FIELDs(insn, 19, 31);
4627 if (simm != 0) {
4628 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4630 } else { /* register */
4631 rs2 = GET_FIELD(insn, 27, 31);
4632 if (rs2 != 0) {
4633 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4636 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4637 (xop > 0x17 && xop <= 0x1d ) ||
4638 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4639 TCGv cpu_val = gen_dest_gpr(dc, rd);
4641 switch (xop) {
4642 case 0x0: /* ld, V9 lduw, load unsigned word */
4643 gen_address_mask(dc, cpu_addr);
4644 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4645 break;
4646 case 0x1: /* ldub, load unsigned byte */
4647 gen_address_mask(dc, cpu_addr);
4648 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4649 break;
4650 case 0x2: /* lduh, load unsigned halfword */
4651 gen_address_mask(dc, cpu_addr);
4652 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4653 break;
4654 case 0x3: /* ldd, load double word */
4655 if (rd & 1)
4656 goto illegal_insn;
4657 else {
4658 TCGv_i32 r_const;
4659 TCGv_i64 t64;
4661 save_state(dc);
4662 r_const = tcg_const_i32(7);
4663 /* XXX remove alignment check */
4664 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4665 tcg_temp_free_i32(r_const);
4666 gen_address_mask(dc, cpu_addr);
4667 t64 = tcg_temp_new_i64();
4668 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4669 tcg_gen_trunc_i64_tl(cpu_val, t64);
4670 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4671 gen_store_gpr(dc, rd + 1, cpu_val);
4672 tcg_gen_shri_i64(t64, t64, 32);
4673 tcg_gen_trunc_i64_tl(cpu_val, t64);
4674 tcg_temp_free_i64(t64);
4675 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4677 break;
4678 case 0x9: /* ldsb, load signed byte */
4679 gen_address_mask(dc, cpu_addr);
4680 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4681 break;
4682 case 0xa: /* ldsh, load signed halfword */
4683 gen_address_mask(dc, cpu_addr);
4684 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4685 break;
4686 case 0xd: /* ldstub -- XXX: should be atomically */
4688 TCGv r_const;
4690 gen_address_mask(dc, cpu_addr);
4691 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4692 r_const = tcg_const_tl(0xff);
4693 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4694 tcg_temp_free(r_const);
4696 break;
4697 case 0x0f:
4698 /* swap, swap register with memory. Also atomically */
4700 TCGv t0 = get_temp_tl(dc);
4701 CHECK_IU_FEATURE(dc, SWAP);
4702 cpu_src1 = gen_load_gpr(dc, rd);
4703 gen_address_mask(dc, cpu_addr);
4704 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4705 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4706 tcg_gen_mov_tl(cpu_val, t0);
4708 break;
4709 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4710 case 0x10: /* lda, V9 lduwa, load word alternate */
4711 #ifndef TARGET_SPARC64
4712 if (IS_IMM)
4713 goto illegal_insn;
4714 if (!supervisor(dc))
4715 goto priv_insn;
4716 #endif
4717 save_state(dc);
4718 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4719 break;
4720 case 0x11: /* lduba, load unsigned byte alternate */
4721 #ifndef TARGET_SPARC64
4722 if (IS_IMM)
4723 goto illegal_insn;
4724 if (!supervisor(dc))
4725 goto priv_insn;
4726 #endif
4727 save_state(dc);
4728 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4729 break;
4730 case 0x12: /* lduha, load unsigned halfword alternate */
4731 #ifndef TARGET_SPARC64
4732 if (IS_IMM)
4733 goto illegal_insn;
4734 if (!supervisor(dc))
4735 goto priv_insn;
4736 #endif
4737 save_state(dc);
4738 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4739 break;
4740 case 0x13: /* ldda, load double word alternate */
4741 #ifndef TARGET_SPARC64
4742 if (IS_IMM)
4743 goto illegal_insn;
4744 if (!supervisor(dc))
4745 goto priv_insn;
4746 #endif
4747 if (rd & 1)
4748 goto illegal_insn;
4749 save_state(dc);
4750 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4751 goto skip_move;
4752 case 0x19: /* ldsba, load signed byte alternate */
4753 #ifndef TARGET_SPARC64
4754 if (IS_IMM)
4755 goto illegal_insn;
4756 if (!supervisor(dc))
4757 goto priv_insn;
4758 #endif
4759 save_state(dc);
4760 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4761 break;
4762 case 0x1a: /* ldsha, load signed halfword alternate */
4763 #ifndef TARGET_SPARC64
4764 if (IS_IMM)
4765 goto illegal_insn;
4766 if (!supervisor(dc))
4767 goto priv_insn;
4768 #endif
4769 save_state(dc);
4770 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4771 break;
4772 case 0x1d: /* ldstuba -- XXX: should be atomically */
4773 #ifndef TARGET_SPARC64
4774 if (IS_IMM)
4775 goto illegal_insn;
4776 if (!supervisor(dc))
4777 goto priv_insn;
4778 #endif
4779 save_state(dc);
4780 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4781 break;
4782 case 0x1f: /* swapa, swap reg with alt. memory. Also
4783 atomically */
4784 CHECK_IU_FEATURE(dc, SWAP);
4785 #ifndef TARGET_SPARC64
4786 if (IS_IMM)
4787 goto illegal_insn;
4788 if (!supervisor(dc))
4789 goto priv_insn;
4790 #endif
4791 save_state(dc);
4792 cpu_src1 = gen_load_gpr(dc, rd);
4793 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4794 break;
4796 #ifndef TARGET_SPARC64
4797 case 0x30: /* ldc */
4798 case 0x31: /* ldcsr */
4799 case 0x33: /* lddc */
4800 goto ncp_insn;
4801 #endif
4802 #endif
4803 #ifdef TARGET_SPARC64
4804 case 0x08: /* V9 ldsw */
4805 gen_address_mask(dc, cpu_addr);
4806 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4807 break;
4808 case 0x0b: /* V9 ldx */
4809 gen_address_mask(dc, cpu_addr);
4810 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4811 break;
4812 case 0x18: /* V9 ldswa */
4813 save_state(dc);
4814 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4815 break;
4816 case 0x1b: /* V9 ldxa */
4817 save_state(dc);
4818 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4819 break;
4820 case 0x2d: /* V9 prefetch, no effect */
4821 goto skip_move;
4822 case 0x30: /* V9 ldfa */
4823 if (gen_trap_ifnofpu(dc)) {
4824 goto jmp_insn;
4826 save_state(dc);
4827 gen_ldf_asi(cpu_addr, insn, 4, rd);
4828 gen_update_fprs_dirty(rd);
4829 goto skip_move;
4830 case 0x33: /* V9 lddfa */
4831 if (gen_trap_ifnofpu(dc)) {
4832 goto jmp_insn;
4834 save_state(dc);
4835 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4836 gen_update_fprs_dirty(DFPREG(rd));
4837 goto skip_move;
4838 case 0x3d: /* V9 prefetcha, no effect */
4839 goto skip_move;
4840 case 0x32: /* V9 ldqfa */
4841 CHECK_FPU_FEATURE(dc, FLOAT128);
4842 if (gen_trap_ifnofpu(dc)) {
4843 goto jmp_insn;
4845 save_state(dc);
4846 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4847 gen_update_fprs_dirty(QFPREG(rd));
4848 goto skip_move;
4849 #endif
4850 default:
4851 goto illegal_insn;
4853 gen_store_gpr(dc, rd, cpu_val);
4854 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4855 skip_move: ;
4856 #endif
4857 } else if (xop >= 0x20 && xop < 0x24) {
4858 TCGv t0;
4860 if (gen_trap_ifnofpu(dc)) {
4861 goto jmp_insn;
4863 save_state(dc);
4864 switch (xop) {
4865 case 0x20: /* ldf, load fpreg */
4866 gen_address_mask(dc, cpu_addr);
4867 t0 = get_temp_tl(dc);
4868 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4869 cpu_dst_32 = gen_dest_fpr_F(dc);
4870 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4871 gen_store_fpr_F(dc, rd, cpu_dst_32);
4872 break;
4873 case 0x21: /* ldfsr, V9 ldxfsr */
4874 #ifdef TARGET_SPARC64
4875 gen_address_mask(dc, cpu_addr);
4876 if (rd == 1) {
4877 TCGv_i64 t64 = tcg_temp_new_i64();
4878 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4879 gen_helper_ldxfsr(cpu_env, t64);
4880 tcg_temp_free_i64(t64);
4881 break;
4883 #endif
4884 cpu_dst_32 = get_temp_i32(dc);
4885 t0 = get_temp_tl(dc);
4886 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4887 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4888 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4889 break;
4890 case 0x22: /* ldqf, load quad fpreg */
4892 TCGv_i32 r_const;
4894 CHECK_FPU_FEATURE(dc, FLOAT128);
4895 r_const = tcg_const_i32(dc->mem_idx);
4896 gen_address_mask(dc, cpu_addr);
4897 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4898 tcg_temp_free_i32(r_const);
4899 gen_op_store_QT0_fpr(QFPREG(rd));
4900 gen_update_fprs_dirty(QFPREG(rd));
4902 break;
4903 case 0x23: /* lddf, load double fpreg */
4904 gen_address_mask(dc, cpu_addr);
4905 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4906 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4907 gen_store_fpr_D(dc, rd, cpu_dst_64);
4908 break;
4909 default:
4910 goto illegal_insn;
4912 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4913 xop == 0xe || xop == 0x1e) {
4914 TCGv cpu_val = gen_load_gpr(dc, rd);
4916 switch (xop) {
4917 case 0x4: /* st, store word */
4918 gen_address_mask(dc, cpu_addr);
4919 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4920 break;
4921 case 0x5: /* stb, store byte */
4922 gen_address_mask(dc, cpu_addr);
4923 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4924 break;
4925 case 0x6: /* sth, store halfword */
4926 gen_address_mask(dc, cpu_addr);
4927 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4928 break;
4929 case 0x7: /* std, store double word */
4930 if (rd & 1)
4931 goto illegal_insn;
4932 else {
4933 TCGv_i32 r_const;
4934 TCGv_i64 t64;
4935 TCGv lo;
4937 save_state(dc);
4938 gen_address_mask(dc, cpu_addr);
4939 r_const = tcg_const_i32(7);
4940 /* XXX remove alignment check */
4941 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4942 tcg_temp_free_i32(r_const);
4943 lo = gen_load_gpr(dc, rd + 1);
4945 t64 = tcg_temp_new_i64();
4946 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4947 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4948 tcg_temp_free_i64(t64);
4950 break;
4951 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4952 case 0x14: /* sta, V9 stwa, store word alternate */
4953 #ifndef TARGET_SPARC64
4954 if (IS_IMM)
4955 goto illegal_insn;
4956 if (!supervisor(dc))
4957 goto priv_insn;
4958 #endif
4959 save_state(dc);
4960 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4961 dc->npc = DYNAMIC_PC;
4962 break;
4963 case 0x15: /* stba, store byte alternate */
4964 #ifndef TARGET_SPARC64
4965 if (IS_IMM)
4966 goto illegal_insn;
4967 if (!supervisor(dc))
4968 goto priv_insn;
4969 #endif
4970 save_state(dc);
4971 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4972 dc->npc = DYNAMIC_PC;
4973 break;
4974 case 0x16: /* stha, store halfword alternate */
4975 #ifndef TARGET_SPARC64
4976 if (IS_IMM)
4977 goto illegal_insn;
4978 if (!supervisor(dc))
4979 goto priv_insn;
4980 #endif
4981 save_state(dc);
4982 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4983 dc->npc = DYNAMIC_PC;
4984 break;
4985 case 0x17: /* stda, store double word alternate */
4986 #ifndef TARGET_SPARC64
4987 if (IS_IMM)
4988 goto illegal_insn;
4989 if (!supervisor(dc))
4990 goto priv_insn;
4991 #endif
4992 if (rd & 1)
4993 goto illegal_insn;
4994 else {
4995 save_state(dc);
4996 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4998 break;
4999 #endif
5000 #ifdef TARGET_SPARC64
5001 case 0x0e: /* V9 stx */
5002 gen_address_mask(dc, cpu_addr);
5003 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5004 break;
5005 case 0x1e: /* V9 stxa */
5006 save_state(dc);
5007 gen_st_asi(cpu_val, cpu_addr, insn, 8);
5008 dc->npc = DYNAMIC_PC;
5009 break;
5010 #endif
5011 default:
5012 goto illegal_insn;
5014 } else if (xop > 0x23 && xop < 0x28) {
5015 if (gen_trap_ifnofpu(dc)) {
5016 goto jmp_insn;
5018 save_state(dc);
5019 switch (xop) {
5020 case 0x24: /* stf, store fpreg */
5022 TCGv t = get_temp_tl(dc);
5023 gen_address_mask(dc, cpu_addr);
5024 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5025 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5026 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5028 break;
5029 case 0x25: /* stfsr, V9 stxfsr */
5031 TCGv t = get_temp_tl(dc);
5033 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5034 #ifdef TARGET_SPARC64
5035 gen_address_mask(dc, cpu_addr);
5036 if (rd == 1) {
5037 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5038 break;
5040 #endif
5041 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5043 break;
5044 case 0x26:
5045 #ifdef TARGET_SPARC64
5046 /* V9 stqf, store quad fpreg */
5048 TCGv_i32 r_const;
5050 CHECK_FPU_FEATURE(dc, FLOAT128);
5051 gen_op_load_fpr_QT0(QFPREG(rd));
5052 r_const = tcg_const_i32(dc->mem_idx);
5053 gen_address_mask(dc, cpu_addr);
5054 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5055 tcg_temp_free_i32(r_const);
5057 break;
5058 #else /* !TARGET_SPARC64 */
5059 /* stdfq, store floating point queue */
5060 #if defined(CONFIG_USER_ONLY)
5061 goto illegal_insn;
5062 #else
5063 if (!supervisor(dc))
5064 goto priv_insn;
5065 if (gen_trap_ifnofpu(dc)) {
5066 goto jmp_insn;
5068 goto nfq_insn;
5069 #endif
5070 #endif
5071 case 0x27: /* stdf, store double fpreg */
5072 gen_address_mask(dc, cpu_addr);
5073 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5074 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5075 break;
5076 default:
5077 goto illegal_insn;
5079 } else if (xop > 0x33 && xop < 0x3f) {
5080 save_state(dc);
5081 switch (xop) {
5082 #ifdef TARGET_SPARC64
5083 case 0x34: /* V9 stfa */
5084 if (gen_trap_ifnofpu(dc)) {
5085 goto jmp_insn;
5087 gen_stf_asi(cpu_addr, insn, 4, rd);
5088 break;
5089 case 0x36: /* V9 stqfa */
5091 TCGv_i32 r_const;
5093 CHECK_FPU_FEATURE(dc, FLOAT128);
5094 if (gen_trap_ifnofpu(dc)) {
5095 goto jmp_insn;
5097 r_const = tcg_const_i32(7);
5098 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5099 tcg_temp_free_i32(r_const);
5100 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5102 break;
5103 case 0x37: /* V9 stdfa */
5104 if (gen_trap_ifnofpu(dc)) {
5105 goto jmp_insn;
5107 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5108 break;
5109 case 0x3e: /* V9 casxa */
5110 rs2 = GET_FIELD(insn, 27, 31);
5111 cpu_src2 = gen_load_gpr(dc, rs2);
5112 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5113 break;
5114 #else
5115 case 0x34: /* stc */
5116 case 0x35: /* stcsr */
5117 case 0x36: /* stdcq */
5118 case 0x37: /* stdc */
5119 goto ncp_insn;
5120 #endif
5121 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5122 case 0x3c: /* V9 or LEON3 casa */
5123 #ifndef TARGET_SPARC64
5124 CHECK_IU_FEATURE(dc, CASA);
5125 if (IS_IMM) {
5126 goto illegal_insn;
5128 if (!supervisor(dc)) {
5129 goto priv_insn;
5131 #endif
5132 rs2 = GET_FIELD(insn, 27, 31);
5133 cpu_src2 = gen_load_gpr(dc, rs2);
5134 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5135 break;
5136 #endif
5137 default:
5138 goto illegal_insn;
5140 } else {
5141 goto illegal_insn;
5144 break;
5146 /* default case for non jump instructions */
5147 if (dc->npc == DYNAMIC_PC) {
5148 dc->pc = DYNAMIC_PC;
5149 gen_op_next_insn();
5150 } else if (dc->npc == JUMP_PC) {
5151 /* we can do a static jump */
5152 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5153 dc->is_br = 1;
5154 } else {
5155 dc->pc = dc->npc;
5156 dc->npc = dc->npc + 4;
5158 jmp_insn:
5159 goto egress;
5160 illegal_insn:
5162 TCGv_i32 r_const;
5164 save_state(dc);
5165 r_const = tcg_const_i32(TT_ILL_INSN);
5166 gen_helper_raise_exception(cpu_env, r_const);
5167 tcg_temp_free_i32(r_const);
5168 dc->is_br = 1;
5170 goto egress;
5171 unimp_flush:
5173 TCGv_i32 r_const;
5175 save_state(dc);
5176 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5177 gen_helper_raise_exception(cpu_env, r_const);
5178 tcg_temp_free_i32(r_const);
5179 dc->is_br = 1;
5181 goto egress;
5182 #if !defined(CONFIG_USER_ONLY)
5183 priv_insn:
5185 TCGv_i32 r_const;
5187 save_state(dc);
5188 r_const = tcg_const_i32(TT_PRIV_INSN);
5189 gen_helper_raise_exception(cpu_env, r_const);
5190 tcg_temp_free_i32(r_const);
5191 dc->is_br = 1;
5193 goto egress;
5194 #endif
5195 nfpu_insn:
5196 save_state(dc);
5197 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5198 dc->is_br = 1;
5199 goto egress;
5200 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5201 nfq_insn:
5202 save_state(dc);
5203 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5204 dc->is_br = 1;
5205 goto egress;
5206 #endif
5207 #ifndef TARGET_SPARC64
5208 ncp_insn:
5210 TCGv r_const;
5212 save_state(dc);
5213 r_const = tcg_const_i32(TT_NCP_INSN);
5214 gen_helper_raise_exception(cpu_env, r_const);
5215 tcg_temp_free(r_const);
5216 dc->is_br = 1;
5218 goto egress;
5219 #endif
5220 egress:
5221 if (dc->n_t32 != 0) {
5222 int i;
5223 for (i = dc->n_t32 - 1; i >= 0; --i) {
5224 tcg_temp_free_i32(dc->t32[i]);
5226 dc->n_t32 = 0;
5228 if (dc->n_ttl != 0) {
5229 int i;
5230 for (i = dc->n_ttl - 1; i >= 0; --i) {
5231 tcg_temp_free(dc->ttl[i]);
5233 dc->n_ttl = 0;
5237 static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
5238 TranslationBlock *tb,
5239 bool spc)
5241 CPUState *cs = CPU(cpu);
5242 CPUSPARCState *env = &cpu->env;
5243 target_ulong pc_start, last_pc;
5244 uint16_t *gen_opc_end;
5245 DisasContext dc1, *dc = &dc1;
5246 CPUBreakpoint *bp;
5247 int j, lj = -1;
5248 int num_insns;
5249 int max_insns;
5250 unsigned int insn;
5252 memset(dc, 0, sizeof(DisasContext));
5253 dc->tb = tb;
5254 pc_start = tb->pc;
5255 dc->pc = pc_start;
5256 last_pc = dc->pc;
5257 dc->npc = (target_ulong) tb->cs_base;
5258 dc->cc_op = CC_OP_DYNAMIC;
5259 dc->mem_idx = cpu_mmu_index(env);
5260 dc->def = env->def;
5261 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5262 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5263 dc->singlestep = (cs->singlestep_enabled || singlestep);
5264 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
5266 num_insns = 0;
5267 max_insns = tb->cflags & CF_COUNT_MASK;
5268 if (max_insns == 0)
5269 max_insns = CF_COUNT_MASK;
5270 gen_tb_start();
5271 do {
5272 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5273 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5274 if (bp->pc == dc->pc) {
5275 if (dc->pc != pc_start)
5276 save_state(dc);
5277 gen_helper_debug(cpu_env);
5278 tcg_gen_exit_tb(0);
5279 dc->is_br = 1;
5280 goto exit_gen_loop;
5284 if (spc) {
5285 qemu_log("Search PC...\n");
5286 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
5287 if (lj < j) {
5288 lj++;
5289 while (lj < j)
5290 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5291 tcg_ctx.gen_opc_pc[lj] = dc->pc;
5292 gen_opc_npc[lj] = dc->npc;
5293 tcg_ctx.gen_opc_instr_start[lj] = 1;
5294 tcg_ctx.gen_opc_icount[lj] = num_insns;
5297 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
5298 gen_io_start();
5299 last_pc = dc->pc;
5300 insn = cpu_ldl_code(env, dc->pc);
5302 disas_sparc_insn(dc, insn);
5303 num_insns++;
5305 if (dc->is_br)
5306 break;
5307 /* if the next PC is different, we abort now */
5308 if (dc->pc != (last_pc + 4))
5309 break;
5310 /* if we reach a page boundary, we stop generation so that the
5311 PC of a TT_TFAULT exception is always in the right page */
5312 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5313 break;
5314 /* if single step mode, we generate only one instruction and
5315 generate an exception */
5316 if (dc->singlestep) {
5317 break;
5319 } while ((tcg_ctx.gen_opc_ptr < gen_opc_end) &&
5320 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5321 num_insns < max_insns);
5323 exit_gen_loop:
5324 if (tb->cflags & CF_LAST_IO) {
5325 gen_io_end();
5327 if (!dc->is_br) {
5328 if (dc->pc != DYNAMIC_PC &&
5329 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5330 /* static PC and NPC: we can use direct chaining */
5331 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5332 } else {
5333 if (dc->pc != DYNAMIC_PC) {
5334 tcg_gen_movi_tl(cpu_pc, dc->pc);
5336 save_npc(dc);
5337 tcg_gen_exit_tb(0);
5340 gen_tb_end(tb, num_insns);
5341 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
5342 if (spc) {
5343 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
5344 lj++;
5345 while (lj <= j)
5346 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5347 #if 0
5348 log_page_dump();
5349 #endif
5350 gen_opc_jump_pc[0] = dc->jump_pc[0];
5351 gen_opc_jump_pc[1] = dc->jump_pc[1];
5352 } else {
5353 tb->size = last_pc + 4 - pc_start;
5354 tb->icount = num_insns;
5356 #ifdef DEBUG_DISAS
5357 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5358 qemu_log("--------------\n");
5359 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5360 log_target_disas(env, pc_start, last_pc + 4 - pc_start, 0);
5361 qemu_log("\n");
5363 #endif
5366 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5368 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false);
5371 void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
5373 gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true);
5376 void gen_intermediate_code_init(CPUSPARCState *env)
5378 unsigned int i;
5379 static int inited;
5380 static const char * const gregnames[8] = {
5381 NULL, // g0 not used
5382 "g1",
5383 "g2",
5384 "g3",
5385 "g4",
5386 "g5",
5387 "g6",
5388 "g7",
5390 static const char * const fregnames[32] = {
5391 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5392 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5393 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5394 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5397 /* init various static tables */
5398 if (!inited) {
5399 inited = 1;
5401 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5402 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5403 offsetof(CPUSPARCState, regwptr),
5404 "regwptr");
5405 #ifdef TARGET_SPARC64
5406 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5407 "xcc");
5408 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5409 "asi");
5410 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5411 "fprs");
5412 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5413 "gsr");
5414 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5415 offsetof(CPUSPARCState, tick_cmpr),
5416 "tick_cmpr");
5417 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5418 offsetof(CPUSPARCState, stick_cmpr),
5419 "stick_cmpr");
5420 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5421 offsetof(CPUSPARCState, hstick_cmpr),
5422 "hstick_cmpr");
5423 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5424 "hintp");
5425 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5426 "htba");
5427 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5428 "hver");
5429 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5430 offsetof(CPUSPARCState, ssr), "ssr");
5431 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5432 offsetof(CPUSPARCState, version), "ver");
5433 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5434 offsetof(CPUSPARCState, softint),
5435 "softint");
5436 #else
5437 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5438 "wim");
5439 #endif
5440 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5441 "cond");
5442 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5443 "cc_src");
5444 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5445 offsetof(CPUSPARCState, cc_src2),
5446 "cc_src2");
5447 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5448 "cc_dst");
5449 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5450 "cc_op");
5451 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5452 "psr");
5453 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5454 "fsr");
5455 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5456 "pc");
5457 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5458 "npc");
5459 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5460 #ifndef CONFIG_USER_ONLY
5461 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5462 "tbr");
5463 #endif
5464 for (i = 1; i < 8; i++) {
5465 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5466 offsetof(CPUSPARCState, gregs[i]),
5467 gregnames[i]);
5469 for (i = 0; i < TARGET_DPREGS; i++) {
5470 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5471 offsetof(CPUSPARCState, fpr[i]),
5472 fregnames[i]);
5477 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos)
5479 target_ulong npc;
5480 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
5481 npc = gen_opc_npc[pc_pos];
5482 if (npc == 1) {
5483 /* dynamic NPC: already stored */
5484 } else if (npc == 2) {
5485 /* jump PC: use 'cond' and the jump targets of the translation */
5486 if (env->cond) {
5487 env->npc = gen_opc_jump_pc[0];
5488 } else {
5489 env->npc = gen_opc_jump_pc[1];
5491 } else {
5492 env->npc = npc;