4 * Copyright (c) 2015 Chen Gang
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
27 #include "spr_def_64.h"
29 #define FMT64X "%016" PRIx64
31 static TCGv_ptr cpu_env
;
33 static TCGv cpu_regs
[TILEGX_R_COUNT
];
35 static const char * const reg_names
[64] = {
36 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
37 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
38 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
39 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
40 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
41 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
42 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
43 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
46 /* Modified registers are cached in temporaries until the end of the bundle. */
52 #define MAX_WRITEBACK 4
54 /* This is the state at translation time. */
56 uint64_t pc
; /* Current pc */
58 TCGv zero
; /* For zero register */
60 DisasContextTemp wb
[MAX_WRITEBACK
];
67 TCGCond cond
; /* branch condition */
68 TCGv dest
; /* branch destination */
69 TCGv val1
; /* value to be compared against zero, for cond */
70 } jmp
; /* Jump object, only once in each TB block */
73 #include "exec/gen-icount.h"
75 /* Differentiate the various pipe encodings. */
81 /* Remerge the base opcode and extension fields for switching.
82 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
83 Y2 opcode field is 2 bits. */
84 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
86 /* Similar, but for Y2 only. */
87 #define OEY2(OP, MODE) (OP + MODE * 4)
89 /* Similar, but make sure opcode names match up. */
90 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
91 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
92 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
93 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
94 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
95 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
96 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
98 #define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
101 static void gen_exception(DisasContext
*dc
, TileExcp num
)
105 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
107 tmp
= tcg_const_i32(num
);
108 gen_helper_exception(cpu_env
, tmp
);
109 tcg_temp_free_i32(tmp
);
113 static bool check_gr(DisasContext
*dc
, uint8_t reg
)
115 if (likely(reg
< TILEGX_R_COUNT
)) {
125 gen_exception(dc
, TILEGX_EXCP_REG_IDN_ACCESS
);
131 gen_exception(dc
, TILEGX_EXCP_REG_UDN_ACCESS
);
134 g_assert_not_reached();
139 static TCGv
load_zero(DisasContext
*dc
)
141 if (TCGV_IS_UNUSED_I64(dc
->zero
)) {
142 dc
->zero
= tcg_const_i64(0);
147 static TCGv
load_gr(DisasContext
*dc
, unsigned reg
)
149 if (check_gr(dc
, reg
)) {
150 return cpu_regs
[reg
];
152 return load_zero(dc
);
155 static TCGv
dest_gr(DisasContext
*dc
, unsigned reg
)
159 /* Skip the result, mark the exception if necessary, and continue */
164 return dc
->wb
[n
].val
= tcg_temp_new_i64();
167 static void gen_saturate_op(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
168 void (*operate
)(TCGv
, TCGv
, TCGv
))
170 TCGv t0
= tcg_temp_new();
172 tcg_gen_ext32s_tl(tdest
, tsrca
);
173 tcg_gen_ext32s_tl(t0
, tsrcb
);
174 operate(tdest
, tdest
, t0
);
176 tcg_gen_movi_tl(t0
, 0x7fffffff);
177 tcg_gen_movcond_tl(TCG_COND_GT
, tdest
, tdest
, t0
, t0
, tdest
);
178 tcg_gen_movi_tl(t0
, -0x80000000LL
);
179 tcg_gen_movcond_tl(TCG_COND_LT
, tdest
, tdest
, t0
, t0
, tdest
);
184 static void gen_atomic_excp(DisasContext
*dc
, unsigned dest
, TCGv tdest
,
185 TCGv tsrca
, TCGv tsrcb
, TileExcp excp
)
187 #ifdef CONFIG_USER_ONLY
190 tcg_gen_st_tl(tsrca
, cpu_env
, offsetof(CPUTLGState
, atomic_srca
));
191 tcg_gen_st_tl(tsrcb
, cpu_env
, offsetof(CPUTLGState
, atomic_srcb
));
192 t
= tcg_const_i32(dest
);
193 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUTLGState
, atomic_dstr
));
194 tcg_temp_free_i32(t
);
196 /* We're going to write the real result in the exception. But in
197 the meantime we've already created a writeback register, and
198 we don't want that to remain uninitialized. */
199 tcg_gen_movi_tl(tdest
, 0);
201 /* Note that we need to delay issuing the exception that implements
202 the atomic operation until after writing back the results of the
203 instruction occupying the X0 pipe. */
204 dc
->atomic_excp
= excp
;
206 gen_exception(dc
, TILEGX_EXCP_OPCODE_UNIMPLEMENTED
);
210 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
211 specified by the bottom 3 bits of TSRCB, and set TDEST to the
212 low 64 bits of the resulting value. */
213 static void gen_dblalign(TCGv tdest
, TCGv tsrcd
, TCGv tsrca
, TCGv tsrcb
)
215 TCGv t0
= tcg_temp_new();
217 tcg_gen_andi_tl(t0
, tsrcb
, 7);
218 tcg_gen_shli_tl(t0
, t0
, 3);
219 tcg_gen_shr_tl(tdest
, tsrcd
, t0
);
221 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
222 arithmetic on a 6-bit field tells us that 64 - t0 is equal
223 to (t0 ^ 63) + 1. So we can do the shift in two parts,
224 neither of which will be an invalid shift by 64. */
225 tcg_gen_xori_tl(t0
, t0
, 63);
226 tcg_gen_shl_tl(t0
, tsrca
, t0
);
227 tcg_gen_shli_tl(t0
, t0
, 1);
228 tcg_gen_or_tl(tdest
, tdest
, t0
);
233 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
234 right shift is an immediate. */
235 static void gen_dblaligni(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
, int shr
)
237 TCGv t0
= tcg_temp_new();
239 tcg_gen_shri_tl(t0
, tsrcb
, shr
);
240 tcg_gen_shli_tl(tdest
, tsrca
, 64 - shr
);
241 tcg_gen_or_tl(tdest
, tdest
, t0
);
250 static void gen_ext_half(TCGv d
, TCGv s
, MulHalf h
)
254 tcg_gen_ext32u_tl(d
, s
);
257 tcg_gen_ext32s_tl(d
, s
);
260 tcg_gen_shri_tl(d
, s
, 32);
263 tcg_gen_sari_tl(d
, s
, 32);
268 static void gen_mul_half(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
269 MulHalf ha
, MulHalf hb
)
271 TCGv t
= tcg_temp_new();
272 gen_ext_half(t
, tsrca
, ha
);
273 gen_ext_half(tdest
, tsrcb
, hb
);
274 tcg_gen_mul_tl(tdest
, tdest
, t
);
278 /* Equality comparison with zero can be done quickly and efficiently. */
279 static void gen_v1cmpeq0(TCGv v
)
281 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
282 TCGv c
= tcg_temp_new();
284 /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
285 tcg_gen_and_tl(c
, v
, m
);
286 tcg_gen_add_tl(c
, c
, m
);
287 tcg_gen_or_tl(c
, c
, m
);
288 tcg_gen_nor_tl(c
, c
, v
);
291 /* Shift the msb down to form the lsb boolean result. */
292 tcg_gen_shri_tl(v
, c
, 7);
296 static void gen_v1cmpne0(TCGv v
)
298 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
299 TCGv c
= tcg_temp_new();
301 /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
302 tcg_gen_and_tl(c
, v
, m
);
303 tcg_gen_add_tl(c
, c
, m
);
304 tcg_gen_or_tl(c
, c
, v
);
305 tcg_gen_andc_tl(c
, c
, m
);
308 /* Shift the msb down to form the lsb boolean result. */
309 tcg_gen_shri_tl(v
, c
, 7);
313 static TileExcp
gen_st_opcode(DisasContext
*dc
, unsigned dest
, unsigned srca
,
314 unsigned srcb
, TCGMemOp memop
, const char *name
)
317 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
320 tcg_gen_qemu_st_tl(load_gr(dc
, srcb
), load_gr(dc
, srca
),
323 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", name
,
324 reg_names
[srca
], reg_names
[srcb
]);
325 return TILEGX_EXCP_NONE
;
328 static TileExcp
gen_st_add_opcode(DisasContext
*dc
, unsigned srca
, unsigned srcb
,
329 int imm
, TCGMemOp memop
, const char *name
)
331 TCGv tsrca
= load_gr(dc
, srca
);
332 TCGv tsrcb
= load_gr(dc
, srcb
);
334 tcg_gen_qemu_st_tl(tsrcb
, tsrca
, dc
->mmuidx
, memop
);
335 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
337 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", name
,
338 reg_names
[srca
], reg_names
[srcb
], imm
);
339 return TILEGX_EXCP_NONE
;
342 static TileExcp
gen_rr_opcode(DisasContext
*dc
, unsigned opext
,
343 unsigned dest
, unsigned srca
)
346 const char *mnemonic
;
348 TileExcp ret
= TILEGX_EXCP_NONE
;
350 /* Eliminate instructions with no output before doing anything else. */
364 case OE_RR_X1(DRAIN
):
367 case OE_RR_X1(FLUSHWB
):
368 mnemonic
= "flushwb";
372 mnemonic
= (dest
== 0x1c && srca
== 0x25 ? "bpt" : "ill");
373 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
374 return TILEGX_EXCP_OPCODE_UNKNOWN
;
379 /* ??? This should yield, especially in system mode. */
382 case OE_RR_X1(SWINT0
):
383 case OE_RR_X1(SWINT2
):
384 case OE_RR_X1(SWINT3
):
385 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
386 case OE_RR_X1(SWINT1
):
387 ret
= TILEGX_EXCP_SYSCALL
;
391 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
393 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
396 case OE_RR_X1(DTLBPR
):
397 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
401 case OE_RR_X1(FLUSH
):
421 case OE_RR_X1(JALRP
):
422 case OE_RR_Y1(JALRP
):
429 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
430 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
432 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
433 dc
->jmp
.dest
= tcg_temp_new();
434 tcg_gen_andi_tl(dc
->jmp
.dest
, load_gr(dc
, srca
), ~7);
437 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
439 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s", mnemonic
, reg_names
[srca
]);
443 tdest
= dest_gr(dc
, dest
);
444 tsrca
= load_gr(dc
, srca
);
447 case OE_RR_X0(CNTLZ
):
448 case OE_RR_Y0(CNTLZ
):
449 gen_helper_cntlz(tdest
, tsrca
);
452 case OE_RR_X0(CNTTZ
):
453 case OE_RR_Y0(CNTTZ
):
454 gen_helper_cnttz(tdest
, tsrca
);
457 case OE_RR_X0(FSINGLE_PACK1
):
458 case OE_RR_Y0(FSINGLE_PACK1
):
460 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
485 case OE_RR_X1(LDNT1S
):
489 case OE_RR_X1(LDNT1U
):
493 case OE_RR_X1(LDNT2S
):
497 case OE_RR_X1(LDNT2U
):
501 case OE_RR_X1(LDNT4S
):
505 case OE_RR_X1(LDNT4U
):
517 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
520 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
521 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
527 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
529 tcg_gen_movi_tl(tdest
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
534 gen_helper_pcnt(tdest
, tsrca
);
537 case OE_RR_X0(REVBITS
):
538 case OE_RR_Y0(REVBITS
):
539 gen_helper_revbits(tdest
, tsrca
);
540 mnemonic
= "revbits";
542 case OE_RR_X0(REVBYTES
):
543 case OE_RR_Y0(REVBYTES
):
544 tcg_gen_bswap64_tl(tdest
, tsrca
);
545 mnemonic
= "revbytes";
547 case OE_RR_X0(TBLIDXB0
):
548 case OE_RR_Y0(TBLIDXB0
):
549 case OE_RR_X0(TBLIDXB1
):
550 case OE_RR_Y0(TBLIDXB1
):
551 case OE_RR_X0(TBLIDXB2
):
552 case OE_RR_Y0(TBLIDXB2
):
553 case OE_RR_X0(TBLIDXB3
):
554 case OE_RR_Y0(TBLIDXB3
):
556 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
559 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
560 reg_names
[dest
], reg_names
[srca
]);
564 static TileExcp
gen_rrr_opcode(DisasContext
*dc
, unsigned opext
,
565 unsigned dest
, unsigned srca
, unsigned srcb
)
567 TCGv tdest
= dest_gr(dc
, dest
);
568 TCGv tsrca
= load_gr(dc
, srca
);
569 TCGv tsrcb
= load_gr(dc
, srcb
);
571 const char *mnemonic
;
574 case OE_RRR(ADDXSC
, 0, X0
):
575 case OE_RRR(ADDXSC
, 0, X1
):
576 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_add_tl
);
579 case OE_RRR(ADDX
, 0, X0
):
580 case OE_RRR(ADDX
, 0, X1
):
581 case OE_RRR(ADDX
, 0, Y0
):
582 case OE_RRR(ADDX
, 0, Y1
):
583 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
584 tcg_gen_ext32s_tl(tdest
, tdest
);
587 case OE_RRR(ADD
, 0, X0
):
588 case OE_RRR(ADD
, 0, X1
):
589 case OE_RRR(ADD
, 0, Y0
):
590 case OE_RRR(ADD
, 0, Y1
):
591 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
594 case OE_RRR(AND
, 0, X0
):
595 case OE_RRR(AND
, 0, X1
):
596 case OE_RRR(AND
, 5, Y0
):
597 case OE_RRR(AND
, 5, Y1
):
598 tcg_gen_and_tl(tdest
, tsrca
, tsrcb
);
601 case OE_RRR(CMOVEQZ
, 0, X0
):
602 case OE_RRR(CMOVEQZ
, 4, Y0
):
603 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, load_zero(dc
),
604 tsrcb
, load_gr(dc
, dest
));
605 mnemonic
= "cmoveqz";
607 case OE_RRR(CMOVNEZ
, 0, X0
):
608 case OE_RRR(CMOVNEZ
, 4, Y0
):
609 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, load_zero(dc
),
610 tsrcb
, load_gr(dc
, dest
));
611 mnemonic
= "cmovnez";
613 case OE_RRR(CMPEQ
, 0, X0
):
614 case OE_RRR(CMPEQ
, 0, X1
):
615 case OE_RRR(CMPEQ
, 3, Y0
):
616 case OE_RRR(CMPEQ
, 3, Y1
):
617 tcg_gen_setcond_tl(TCG_COND_EQ
, tdest
, tsrca
, tsrcb
);
620 case OE_RRR(CMPEXCH4
, 0, X1
):
621 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
622 TILEGX_EXCP_OPCODE_CMPEXCH4
);
623 mnemonic
= "cmpexch4";
625 case OE_RRR(CMPEXCH
, 0, X1
):
626 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
627 TILEGX_EXCP_OPCODE_CMPEXCH
);
628 mnemonic
= "cmpexch";
630 case OE_RRR(CMPLES
, 0, X0
):
631 case OE_RRR(CMPLES
, 0, X1
):
632 case OE_RRR(CMPLES
, 2, Y0
):
633 case OE_RRR(CMPLES
, 2, Y1
):
634 tcg_gen_setcond_tl(TCG_COND_LE
, tdest
, tsrca
, tsrcb
);
637 case OE_RRR(CMPLEU
, 0, X0
):
638 case OE_RRR(CMPLEU
, 0, X1
):
639 case OE_RRR(CMPLEU
, 2, Y0
):
640 case OE_RRR(CMPLEU
, 2, Y1
):
641 tcg_gen_setcond_tl(TCG_COND_LEU
, tdest
, tsrca
, tsrcb
);
644 case OE_RRR(CMPLTS
, 0, X0
):
645 case OE_RRR(CMPLTS
, 0, X1
):
646 case OE_RRR(CMPLTS
, 2, Y0
):
647 case OE_RRR(CMPLTS
, 2, Y1
):
648 tcg_gen_setcond_tl(TCG_COND_LT
, tdest
, tsrca
, tsrcb
);
651 case OE_RRR(CMPLTU
, 0, X0
):
652 case OE_RRR(CMPLTU
, 0, X1
):
653 case OE_RRR(CMPLTU
, 2, Y0
):
654 case OE_RRR(CMPLTU
, 2, Y1
):
655 tcg_gen_setcond_tl(TCG_COND_LTU
, tdest
, tsrca
, tsrcb
);
658 case OE_RRR(CMPNE
, 0, X0
):
659 case OE_RRR(CMPNE
, 0, X1
):
660 case OE_RRR(CMPNE
, 3, Y0
):
661 case OE_RRR(CMPNE
, 3, Y1
):
662 tcg_gen_setcond_tl(TCG_COND_NE
, tdest
, tsrca
, tsrcb
);
665 case OE_RRR(CMULAF
, 0, X0
):
666 case OE_RRR(CMULA
, 0, X0
):
667 case OE_RRR(CMULFR
, 0, X0
):
668 case OE_RRR(CMULF
, 0, X0
):
669 case OE_RRR(CMULHR
, 0, X0
):
670 case OE_RRR(CMULH
, 0, X0
):
671 case OE_RRR(CMUL
, 0, X0
):
672 case OE_RRR(CRC32_32
, 0, X0
):
673 case OE_RRR(CRC32_8
, 0, X0
):
674 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
675 case OE_RRR(DBLALIGN2
, 0, X0
):
676 case OE_RRR(DBLALIGN2
, 0, X1
):
677 gen_dblaligni(tdest
, tsrca
, tsrcb
, 16);
678 mnemonic
= "dblalign2";
680 case OE_RRR(DBLALIGN4
, 0, X0
):
681 case OE_RRR(DBLALIGN4
, 0, X1
):
682 gen_dblaligni(tdest
, tsrca
, tsrcb
, 32);
683 mnemonic
= "dblalign4";
685 case OE_RRR(DBLALIGN6
, 0, X0
):
686 case OE_RRR(DBLALIGN6
, 0, X1
):
687 gen_dblaligni(tdest
, tsrca
, tsrcb
, 48);
688 mnemonic
= "dblalign6";
690 case OE_RRR(DBLALIGN
, 0, X0
):
691 gen_dblalign(tdest
, load_gr(dc
, dest
), tsrca
, tsrcb
);
692 mnemonic
= "dblalign";
694 case OE_RRR(EXCH4
, 0, X1
):
695 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
696 TILEGX_EXCP_OPCODE_EXCH4
);
699 case OE_RRR(EXCH
, 0, X1
):
700 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
701 TILEGX_EXCP_OPCODE_EXCH
);
704 case OE_RRR(FDOUBLE_ADDSUB
, 0, X0
):
705 case OE_RRR(FDOUBLE_ADD_FLAGS
, 0, X0
):
706 case OE_RRR(FDOUBLE_MUL_FLAGS
, 0, X0
):
707 case OE_RRR(FDOUBLE_PACK1
, 0, X0
):
708 case OE_RRR(FDOUBLE_PACK2
, 0, X0
):
709 case OE_RRR(FDOUBLE_SUB_FLAGS
, 0, X0
):
710 case OE_RRR(FDOUBLE_UNPACK_MAX
, 0, X0
):
711 case OE_RRR(FDOUBLE_UNPACK_MIN
, 0, X0
):
712 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
713 case OE_RRR(FETCHADD4
, 0, X1
):
714 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
715 TILEGX_EXCP_OPCODE_FETCHADD4
);
716 mnemonic
= "fetchadd4";
718 case OE_RRR(FETCHADDGEZ4
, 0, X1
):
719 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
720 TILEGX_EXCP_OPCODE_FETCHADDGEZ4
);
721 mnemonic
= "fetchaddgez4";
723 case OE_RRR(FETCHADDGEZ
, 0, X1
):
724 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
725 TILEGX_EXCP_OPCODE_FETCHADDGEZ
);
726 mnemonic
= "fetchaddgez";
728 case OE_RRR(FETCHADD
, 0, X1
):
729 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
730 TILEGX_EXCP_OPCODE_FETCHADD
);
731 mnemonic
= "fetchadd";
733 case OE_RRR(FETCHAND4
, 0, X1
):
734 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
735 TILEGX_EXCP_OPCODE_FETCHAND4
);
736 mnemonic
= "fetchand4";
738 case OE_RRR(FETCHAND
, 0, X1
):
739 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
740 TILEGX_EXCP_OPCODE_FETCHAND
);
741 mnemonic
= "fetchand";
743 case OE_RRR(FETCHOR4
, 0, X1
):
744 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
745 TILEGX_EXCP_OPCODE_FETCHOR4
);
746 mnemonic
= "fetchor4";
748 case OE_RRR(FETCHOR
, 0, X1
):
749 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
750 TILEGX_EXCP_OPCODE_FETCHOR
);
751 mnemonic
= "fetchor";
753 case OE_RRR(FSINGLE_ADD1
, 0, X0
):
754 case OE_RRR(FSINGLE_ADDSUB2
, 0, X0
):
755 case OE_RRR(FSINGLE_MUL1
, 0, X0
):
756 case OE_RRR(FSINGLE_MUL2
, 0, X0
):
757 case OE_RRR(FSINGLE_PACK2
, 0, X0
):
758 case OE_RRR(FSINGLE_SUB1
, 0, X0
):
759 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
760 case OE_RRR(MNZ
, 0, X0
):
761 case OE_RRR(MNZ
, 0, X1
):
762 case OE_RRR(MNZ
, 4, Y0
):
763 case OE_RRR(MNZ
, 4, Y1
):
765 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, t0
, tsrcb
, t0
);
768 case OE_RRR(MULAX
, 0, X0
):
769 case OE_RRR(MULAX
, 3, Y0
):
770 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
771 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
772 tcg_gen_ext32s_tl(tdest
, tdest
);
775 case OE_RRR(MULA_HS_HS
, 0, X0
):
776 case OE_RRR(MULA_HS_HS
, 9, Y0
):
777 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
778 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
779 mnemonic
= "mula_hs_hs";
781 case OE_RRR(MULA_HS_HU
, 0, X0
):
782 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
783 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
784 mnemonic
= "mula_hs_hu";
786 case OE_RRR(MULA_HS_LS
, 0, X0
):
787 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
788 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
789 mnemonic
= "mula_hs_ls";
791 case OE_RRR(MULA_HS_LU
, 0, X0
):
792 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
793 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
794 mnemonic
= "mula_hs_lu";
796 case OE_RRR(MULA_HU_HU
, 0, X0
):
797 case OE_RRR(MULA_HU_HU
, 9, Y0
):
798 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
799 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
800 mnemonic
= "mula_hu_hu";
802 case OE_RRR(MULA_HU_LS
, 0, X0
):
803 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
804 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
805 mnemonic
= "mula_hu_ls";
807 case OE_RRR(MULA_HU_LU
, 0, X0
):
808 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
809 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
810 mnemonic
= "mula_hu_lu";
812 case OE_RRR(MULA_LS_LS
, 0, X0
):
813 case OE_RRR(MULA_LS_LS
, 9, Y0
):
814 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
815 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
816 mnemonic
= "mula_ls_ls";
818 case OE_RRR(MULA_LS_LU
, 0, X0
):
819 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
820 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
821 mnemonic
= "mula_ls_lu";
823 case OE_RRR(MULA_LU_LU
, 0, X0
):
824 case OE_RRR(MULA_LU_LU
, 9, Y0
):
825 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
826 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
827 mnemonic
= "mula_lu_lu";
829 case OE_RRR(MULX
, 0, X0
):
830 case OE_RRR(MULX
, 3, Y0
):
831 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
832 tcg_gen_ext32s_tl(tdest
, tdest
);
835 case OE_RRR(MUL_HS_HS
, 0, X0
):
836 case OE_RRR(MUL_HS_HS
, 8, Y0
):
837 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
838 mnemonic
= "mul_hs_hs";
840 case OE_RRR(MUL_HS_HU
, 0, X0
):
841 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
842 mnemonic
= "mul_hs_hu";
844 case OE_RRR(MUL_HS_LS
, 0, X0
):
845 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
846 mnemonic
= "mul_hs_ls";
848 case OE_RRR(MUL_HS_LU
, 0, X0
):
849 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
850 mnemonic
= "mul_hs_lu";
852 case OE_RRR(MUL_HU_HU
, 0, X0
):
853 case OE_RRR(MUL_HU_HU
, 8, Y0
):
854 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
855 mnemonic
= "mul_hu_hu";
857 case OE_RRR(MUL_HU_LS
, 0, X0
):
858 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
859 mnemonic
= "mul_hu_ls";
861 case OE_RRR(MUL_HU_LU
, 0, X0
):
862 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
863 mnemonic
= "mul_hu_lu";
865 case OE_RRR(MUL_LS_LS
, 0, X0
):
866 case OE_RRR(MUL_LS_LS
, 8, Y0
):
867 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
868 mnemonic
= "mul_ls_ls";
870 case OE_RRR(MUL_LS_LU
, 0, X0
):
871 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
872 mnemonic
= "mul_ls_lu";
874 case OE_RRR(MUL_LU_LU
, 0, X0
):
875 case OE_RRR(MUL_LU_LU
, 8, Y0
):
876 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
877 mnemonic
= "mul_lu_lu";
879 case OE_RRR(MZ
, 0, X0
):
880 case OE_RRR(MZ
, 0, X1
):
881 case OE_RRR(MZ
, 4, Y0
):
882 case OE_RRR(MZ
, 4, Y1
):
884 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, t0
, tsrcb
, t0
);
887 case OE_RRR(NOR
, 0, X0
):
888 case OE_RRR(NOR
, 0, X1
):
889 case OE_RRR(NOR
, 5, Y0
):
890 case OE_RRR(NOR
, 5, Y1
):
891 tcg_gen_nor_tl(tdest
, tsrca
, tsrcb
);
894 case OE_RRR(OR
, 0, X0
):
895 case OE_RRR(OR
, 0, X1
):
896 case OE_RRR(OR
, 5, Y0
):
897 case OE_RRR(OR
, 5, Y1
):
898 tcg_gen_or_tl(tdest
, tsrca
, tsrcb
);
901 case OE_RRR(ROTL
, 0, X0
):
902 case OE_RRR(ROTL
, 0, X1
):
903 case OE_RRR(ROTL
, 6, Y0
):
904 case OE_RRR(ROTL
, 6, Y1
):
905 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
906 tcg_gen_rotl_tl(tdest
, tsrca
, tdest
);
909 case OE_RRR(SHL1ADDX
, 0, X0
):
910 case OE_RRR(SHL1ADDX
, 0, X1
):
911 case OE_RRR(SHL1ADDX
, 7, Y0
):
912 case OE_RRR(SHL1ADDX
, 7, Y1
):
913 tcg_gen_shli_tl(tdest
, tsrca
, 1);
914 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
915 tcg_gen_ext32s_tl(tdest
, tdest
);
916 mnemonic
= "shl1addx";
918 case OE_RRR(SHL1ADD
, 0, X0
):
919 case OE_RRR(SHL1ADD
, 0, X1
):
920 case OE_RRR(SHL1ADD
, 1, Y0
):
921 case OE_RRR(SHL1ADD
, 1, Y1
):
922 tcg_gen_shli_tl(tdest
, tsrca
, 1);
923 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
924 mnemonic
= "shl1add";
926 case OE_RRR(SHL2ADDX
, 0, X0
):
927 case OE_RRR(SHL2ADDX
, 0, X1
):
928 case OE_RRR(SHL2ADDX
, 7, Y0
):
929 case OE_RRR(SHL2ADDX
, 7, Y1
):
930 tcg_gen_shli_tl(tdest
, tsrca
, 2);
931 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
932 tcg_gen_ext32s_tl(tdest
, tdest
);
933 mnemonic
= "shl2addx";
935 case OE_RRR(SHL2ADD
, 0, X0
):
936 case OE_RRR(SHL2ADD
, 0, X1
):
937 case OE_RRR(SHL2ADD
, 1, Y0
):
938 case OE_RRR(SHL2ADD
, 1, Y1
):
939 tcg_gen_shli_tl(tdest
, tsrca
, 2);
940 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
941 mnemonic
= "shl2add";
943 case OE_RRR(SHL3ADDX
, 0, X0
):
944 case OE_RRR(SHL3ADDX
, 0, X1
):
945 case OE_RRR(SHL3ADDX
, 7, Y0
):
946 case OE_RRR(SHL3ADDX
, 7, Y1
):
947 tcg_gen_shli_tl(tdest
, tsrca
, 3);
948 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
949 tcg_gen_ext32s_tl(tdest
, tdest
);
950 mnemonic
= "shl3addx";
952 case OE_RRR(SHL3ADD
, 0, X0
):
953 case OE_RRR(SHL3ADD
, 0, X1
):
954 case OE_RRR(SHL3ADD
, 1, Y0
):
955 case OE_RRR(SHL3ADD
, 1, Y1
):
956 tcg_gen_shli_tl(tdest
, tsrca
, 3);
957 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
958 mnemonic
= "shl3add";
960 case OE_RRR(SHLX
, 0, X0
):
961 case OE_RRR(SHLX
, 0, X1
):
962 tcg_gen_andi_tl(tdest
, tsrcb
, 31);
963 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
964 tcg_gen_ext32s_tl(tdest
, tdest
);
967 case OE_RRR(SHL
, 0, X0
):
968 case OE_RRR(SHL
, 0, X1
):
969 case OE_RRR(SHL
, 6, Y0
):
970 case OE_RRR(SHL
, 6, Y1
):
971 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
972 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
975 case OE_RRR(SHRS
, 0, X0
):
976 case OE_RRR(SHRS
, 0, X1
):
977 case OE_RRR(SHRS
, 6, Y0
):
978 case OE_RRR(SHRS
, 6, Y1
):
979 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
980 tcg_gen_sar_tl(tdest
, tsrca
, tdest
);
983 case OE_RRR(SHRUX
, 0, X0
):
984 case OE_RRR(SHRUX
, 0, X1
):
986 tcg_gen_andi_tl(t0
, tsrcb
, 31);
987 tcg_gen_ext32u_tl(tdest
, tsrca
);
988 tcg_gen_shr_tl(tdest
, tdest
, t0
);
989 tcg_gen_ext32s_tl(tdest
, tdest
);
993 case OE_RRR(SHRU
, 0, X0
):
994 case OE_RRR(SHRU
, 0, X1
):
995 case OE_RRR(SHRU
, 6, Y0
):
996 case OE_RRR(SHRU
, 6, Y1
):
997 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
998 tcg_gen_shr_tl(tdest
, tsrca
, tdest
);
1001 case OE_RRR(SHUFFLEBYTES
, 0, X0
):
1002 gen_helper_shufflebytes(tdest
, load_gr(dc
, dest
), tsrca
, tsrca
);
1003 mnemonic
= "shufflebytes";
1005 case OE_RRR(SUBXSC
, 0, X0
):
1006 case OE_RRR(SUBXSC
, 0, X1
):
1007 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_sub_tl
);
1008 mnemonic
= "subxsc";
1010 case OE_RRR(SUBX
, 0, X0
):
1011 case OE_RRR(SUBX
, 0, X1
):
1012 case OE_RRR(SUBX
, 0, Y0
):
1013 case OE_RRR(SUBX
, 0, Y1
):
1014 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1015 tcg_gen_ext32s_tl(tdest
, tdest
);
1018 case OE_RRR(SUB
, 0, X0
):
1019 case OE_RRR(SUB
, 0, X1
):
1020 case OE_RRR(SUB
, 0, Y0
):
1021 case OE_RRR(SUB
, 0, Y1
):
1022 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1025 case OE_RRR(V1ADDUC
, 0, X0
):
1026 case OE_RRR(V1ADDUC
, 0, X1
):
1027 case OE_RRR(V1ADD
, 0, X0
):
1028 case OE_RRR(V1ADD
, 0, X1
):
1029 case OE_RRR(V1ADIFFU
, 0, X0
):
1030 case OE_RRR(V1AVGU
, 0, X0
):
1031 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1032 case OE_RRR(V1CMPEQ
, 0, X0
):
1033 case OE_RRR(V1CMPEQ
, 0, X1
):
1034 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1035 gen_v1cmpeq0(tdest
);
1036 mnemonic
= "v1cmpeq";
1038 case OE_RRR(V1CMPLES
, 0, X0
):
1039 case OE_RRR(V1CMPLES
, 0, X1
):
1040 case OE_RRR(V1CMPLEU
, 0, X0
):
1041 case OE_RRR(V1CMPLEU
, 0, X1
):
1042 case OE_RRR(V1CMPLTS
, 0, X0
):
1043 case OE_RRR(V1CMPLTS
, 0, X1
):
1044 case OE_RRR(V1CMPLTU
, 0, X0
):
1045 case OE_RRR(V1CMPLTU
, 0, X1
):
1046 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1047 case OE_RRR(V1CMPNE
, 0, X0
):
1048 case OE_RRR(V1CMPNE
, 0, X1
):
1049 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1050 gen_v1cmpne0(tdest
);
1051 mnemonic
= "v1cmpne";
1053 case OE_RRR(V1DDOTPUA
, 0, X0
):
1054 case OE_RRR(V1DDOTPUSA
, 0, X0
):
1055 case OE_RRR(V1DDOTPUS
, 0, X0
):
1056 case OE_RRR(V1DDOTPU
, 0, X0
):
1057 case OE_RRR(V1DOTPA
, 0, X0
):
1058 case OE_RRR(V1DOTPUA
, 0, X0
):
1059 case OE_RRR(V1DOTPUSA
, 0, X0
):
1060 case OE_RRR(V1DOTPUS
, 0, X0
):
1061 case OE_RRR(V1DOTPU
, 0, X0
):
1062 case OE_RRR(V1DOTP
, 0, X0
):
1063 case OE_RRR(V1INT_H
, 0, X0
):
1064 case OE_RRR(V1INT_H
, 0, X1
):
1065 case OE_RRR(V1INT_L
, 0, X0
):
1066 case OE_RRR(V1INT_L
, 0, X1
):
1067 case OE_RRR(V1MAXU
, 0, X0
):
1068 case OE_RRR(V1MAXU
, 0, X1
):
1069 case OE_RRR(V1MINU
, 0, X0
):
1070 case OE_RRR(V1MINU
, 0, X1
):
1071 case OE_RRR(V1MNZ
, 0, X0
):
1072 case OE_RRR(V1MNZ
, 0, X1
):
1073 case OE_RRR(V1MULTU
, 0, X0
):
1074 case OE_RRR(V1MULUS
, 0, X0
):
1075 case OE_RRR(V1MULU
, 0, X0
):
1076 case OE_RRR(V1MZ
, 0, X0
):
1077 case OE_RRR(V1MZ
, 0, X1
):
1078 case OE_RRR(V1SADAU
, 0, X0
):
1079 case OE_RRR(V1SADU
, 0, X0
):
1080 case OE_RRR(V1SHL
, 0, X0
):
1081 case OE_RRR(V1SHL
, 0, X1
):
1082 case OE_RRR(V1SHRS
, 0, X0
):
1083 case OE_RRR(V1SHRS
, 0, X1
):
1084 case OE_RRR(V1SHRU
, 0, X0
):
1085 case OE_RRR(V1SHRU
, 0, X1
):
1086 case OE_RRR(V1SUBUC
, 0, X0
):
1087 case OE_RRR(V1SUBUC
, 0, X1
):
1088 case OE_RRR(V1SUB
, 0, X0
):
1089 case OE_RRR(V1SUB
, 0, X1
):
1090 case OE_RRR(V2ADDSC
, 0, X0
):
1091 case OE_RRR(V2ADDSC
, 0, X1
):
1092 case OE_RRR(V2ADD
, 0, X0
):
1093 case OE_RRR(V2ADD
, 0, X1
):
1094 case OE_RRR(V2ADIFFS
, 0, X0
):
1095 case OE_RRR(V2AVGS
, 0, X0
):
1096 case OE_RRR(V2CMPEQ
, 0, X0
):
1097 case OE_RRR(V2CMPEQ
, 0, X1
):
1098 case OE_RRR(V2CMPLES
, 0, X0
):
1099 case OE_RRR(V2CMPLES
, 0, X1
):
1100 case OE_RRR(V2CMPLEU
, 0, X0
):
1101 case OE_RRR(V2CMPLEU
, 0, X1
):
1102 case OE_RRR(V2CMPLTS
, 0, X0
):
1103 case OE_RRR(V2CMPLTS
, 0, X1
):
1104 case OE_RRR(V2CMPLTU
, 0, X0
):
1105 case OE_RRR(V2CMPLTU
, 0, X1
):
1106 case OE_RRR(V2CMPNE
, 0, X0
):
1107 case OE_RRR(V2CMPNE
, 0, X1
):
1108 case OE_RRR(V2DOTPA
, 0, X0
):
1109 case OE_RRR(V2DOTP
, 0, X0
):
1110 case OE_RRR(V2INT_H
, 0, X0
):
1111 case OE_RRR(V2INT_H
, 0, X1
):
1112 case OE_RRR(V2INT_L
, 0, X0
):
1113 case OE_RRR(V2INT_L
, 0, X1
):
1114 case OE_RRR(V2MAXS
, 0, X0
):
1115 case OE_RRR(V2MAXS
, 0, X1
):
1116 case OE_RRR(V2MINS
, 0, X0
):
1117 case OE_RRR(V2MINS
, 0, X1
):
1118 case OE_RRR(V2MNZ
, 0, X0
):
1119 case OE_RRR(V2MNZ
, 0, X1
):
1120 case OE_RRR(V2MULFSC
, 0, X0
):
1121 case OE_RRR(V2MULS
, 0, X0
):
1122 case OE_RRR(V2MULTS
, 0, X0
):
1123 case OE_RRR(V2MZ
, 0, X0
):
1124 case OE_RRR(V2MZ
, 0, X1
):
1125 case OE_RRR(V2PACKH
, 0, X0
):
1126 case OE_RRR(V2PACKH
, 0, X1
):
1127 case OE_RRR(V2PACKL
, 0, X0
):
1128 case OE_RRR(V2PACKL
, 0, X1
):
1129 case OE_RRR(V2PACKUC
, 0, X0
):
1130 case OE_RRR(V2PACKUC
, 0, X1
):
1131 case OE_RRR(V2SADAS
, 0, X0
):
1132 case OE_RRR(V2SADAU
, 0, X0
):
1133 case OE_RRR(V2SADS
, 0, X0
):
1134 case OE_RRR(V2SADU
, 0, X0
):
1135 case OE_RRR(V2SHLSC
, 0, X0
):
1136 case OE_RRR(V2SHLSC
, 0, X1
):
1137 case OE_RRR(V2SHL
, 0, X0
):
1138 case OE_RRR(V2SHL
, 0, X1
):
1139 case OE_RRR(V2SHRS
, 0, X0
):
1140 case OE_RRR(V2SHRS
, 0, X1
):
1141 case OE_RRR(V2SHRU
, 0, X0
):
1142 case OE_RRR(V2SHRU
, 0, X1
):
1143 case OE_RRR(V2SUBSC
, 0, X0
):
1144 case OE_RRR(V2SUBSC
, 0, X1
):
1145 case OE_RRR(V2SUB
, 0, X0
):
1146 case OE_RRR(V2SUB
, 0, X1
):
1147 case OE_RRR(V4ADDSC
, 0, X0
):
1148 case OE_RRR(V4ADDSC
, 0, X1
):
1149 case OE_RRR(V4ADD
, 0, X0
):
1150 case OE_RRR(V4ADD
, 0, X1
):
1151 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1152 case OE_RRR(V4INT_H
, 0, X0
):
1153 case OE_RRR(V4INT_H
, 0, X1
):
1154 tcg_gen_shri_tl(tdest
, tsrcb
, 32);
1155 tcg_gen_deposit_tl(tdest
, tsrca
, tdest
, 0, 32);
1156 mnemonic
= "v4int_h";
1158 case OE_RRR(V4INT_L
, 0, X0
):
1159 case OE_RRR(V4INT_L
, 0, X1
):
1160 tcg_gen_deposit_tl(tdest
, tsrcb
, tsrca
, 32, 32);
1161 mnemonic
= "v4int_l";
1163 case OE_RRR(V4PACKSC
, 0, X0
):
1164 case OE_RRR(V4PACKSC
, 0, X1
):
1165 case OE_RRR(V4SHLSC
, 0, X0
):
1166 case OE_RRR(V4SHLSC
, 0, X1
):
1167 case OE_RRR(V4SHL
, 0, X0
):
1168 case OE_RRR(V4SHL
, 0, X1
):
1169 case OE_RRR(V4SHRS
, 0, X0
):
1170 case OE_RRR(V4SHRS
, 0, X1
):
1171 case OE_RRR(V4SHRU
, 0, X0
):
1172 case OE_RRR(V4SHRU
, 0, X1
):
1173 case OE_RRR(V4SUBSC
, 0, X0
):
1174 case OE_RRR(V4SUBSC
, 0, X1
):
1175 case OE_RRR(V4SUB
, 0, X0
):
1176 case OE_RRR(V4SUB
, 0, X1
):
1177 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1178 case OE_RRR(XOR
, 0, X0
):
1179 case OE_RRR(XOR
, 0, X1
):
1180 case OE_RRR(XOR
, 5, Y0
):
1181 case OE_RRR(XOR
, 5, Y1
):
1182 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1186 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1189 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %s", mnemonic
,
1190 reg_names
[dest
], reg_names
[srca
], reg_names
[srcb
]);
1191 return TILEGX_EXCP_NONE
;
1194 static TileExcp
gen_rri_opcode(DisasContext
*dc
, unsigned opext
,
1195 unsigned dest
, unsigned srca
, int imm
)
1197 TCGv tdest
= dest_gr(dc
, dest
);
1198 TCGv tsrca
= load_gr(dc
, srca
);
1199 const char *mnemonic
;
1203 case OE(ADDI_OPCODE_Y0
, 0, Y0
):
1204 case OE(ADDI_OPCODE_Y1
, 0, Y1
):
1205 case OE_IM(ADDI
, X0
):
1206 case OE_IM(ADDI
, X1
):
1207 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1210 case OE(ADDXI_OPCODE_Y0
, 0, Y0
):
1211 case OE(ADDXI_OPCODE_Y1
, 0, Y1
):
1212 case OE_IM(ADDXI
, X0
):
1213 case OE_IM(ADDXI
, X1
):
1214 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1215 tcg_gen_ext32s_tl(tdest
, tdest
);
1218 case OE(ANDI_OPCODE_Y0
, 0, Y0
):
1219 case OE(ANDI_OPCODE_Y1
, 0, Y1
):
1220 case OE_IM(ANDI
, X0
):
1221 case OE_IM(ANDI
, X1
):
1222 tcg_gen_andi_tl(tdest
, tsrca
, imm
);
1225 case OE(CMPEQI_OPCODE_Y0
, 0, Y0
):
1226 case OE(CMPEQI_OPCODE_Y1
, 0, Y1
):
1227 case OE_IM(CMPEQI
, X0
):
1228 case OE_IM(CMPEQI
, X1
):
1229 tcg_gen_setcondi_tl(TCG_COND_EQ
, tdest
, tsrca
, imm
);
1230 mnemonic
= "cmpeqi";
1232 case OE(CMPLTSI_OPCODE_Y0
, 0, Y0
):
1233 case OE(CMPLTSI_OPCODE_Y1
, 0, Y1
):
1234 case OE_IM(CMPLTSI
, X0
):
1235 case OE_IM(CMPLTSI
, X1
):
1236 tcg_gen_setcondi_tl(TCG_COND_LT
, tdest
, tsrca
, imm
);
1237 mnemonic
= "cmpltsi";
1239 case OE_IM(CMPLTUI
, X0
):
1240 case OE_IM(CMPLTUI
, X1
):
1241 tcg_gen_setcondi_tl(TCG_COND_LTU
, tdest
, tsrca
, imm
);
1242 mnemonic
= "cmpltui";
1244 case OE_IM(LD1S_ADD
, X1
):
1246 mnemonic
= "ld1s_add";
1248 case OE_IM(LD1U_ADD
, X1
):
1250 mnemonic
= "ld1u_add";
1252 case OE_IM(LD2S_ADD
, X1
):
1254 mnemonic
= "ld2s_add";
1256 case OE_IM(LD2U_ADD
, X1
):
1258 mnemonic
= "ld2u_add";
1260 case OE_IM(LD4S_ADD
, X1
):
1262 mnemonic
= "ld4s_add";
1264 case OE_IM(LD4U_ADD
, X1
):
1266 mnemonic
= "ld4u_add";
1268 case OE_IM(LDNT1S_ADD
, X1
):
1270 mnemonic
= "ldnt1s_add";
1272 case OE_IM(LDNT1U_ADD
, X1
):
1274 mnemonic
= "ldnt1u_add";
1276 case OE_IM(LDNT2S_ADD
, X1
):
1278 mnemonic
= "ldnt2s_add";
1280 case OE_IM(LDNT2U_ADD
, X1
):
1282 mnemonic
= "ldnt2u_add";
1284 case OE_IM(LDNT4S_ADD
, X1
):
1286 mnemonic
= "ldnt4s_add";
1288 case OE_IM(LDNT4U_ADD
, X1
):
1290 mnemonic
= "ldnt4u_add";
1292 case OE_IM(LDNT_ADD
, X1
):
1294 mnemonic
= "ldnt_add";
1296 case OE_IM(LD_ADD
, X1
):
1298 mnemonic
= "ldnt_add";
1300 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
1301 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1303 case OE_IM(LDNA_ADD
, X1
):
1304 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
1305 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
1306 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1307 mnemonic
= "ldna_add";
1309 case OE_IM(ORI
, X0
):
1310 case OE_IM(ORI
, X1
):
1311 tcg_gen_ori_tl(tdest
, tsrca
, imm
);
1314 case OE_IM(V1ADDI
, X0
):
1315 case OE_IM(V1ADDI
, X1
):
1316 case OE_IM(V1CMPEQI
, X0
):
1317 case OE_IM(V1CMPEQI
, X1
):
1318 tcg_gen_xori_tl(tdest
, tsrca
, V1_IMM(imm
));
1319 gen_v1cmpeq0(tdest
);
1320 mnemonic
= "v1cmpeqi";
1322 case OE_IM(V1CMPLTSI
, X0
):
1323 case OE_IM(V1CMPLTSI
, X1
):
1324 case OE_IM(V1CMPLTUI
, X0
):
1325 case OE_IM(V1CMPLTUI
, X1
):
1326 case OE_IM(V1MAXUI
, X0
):
1327 case OE_IM(V1MAXUI
, X1
):
1328 case OE_IM(V1MINUI
, X0
):
1329 case OE_IM(V1MINUI
, X1
):
1330 case OE_IM(V2ADDI
, X0
):
1331 case OE_IM(V2ADDI
, X1
):
1332 case OE_IM(V2CMPEQI
, X0
):
1333 case OE_IM(V2CMPEQI
, X1
):
1334 case OE_IM(V2CMPLTSI
, X0
):
1335 case OE_IM(V2CMPLTSI
, X1
):
1336 case OE_IM(V2CMPLTUI
, X0
):
1337 case OE_IM(V2CMPLTUI
, X1
):
1338 case OE_IM(V2MAXSI
, X0
):
1339 case OE_IM(V2MAXSI
, X1
):
1340 case OE_IM(V2MINSI
, X0
):
1341 case OE_IM(V2MINSI
, X1
):
1342 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1343 case OE_IM(XORI
, X0
):
1344 case OE_IM(XORI
, X1
):
1345 tcg_gen_xori_tl(tdest
, tsrca
, imm
);
1349 case OE_SH(ROTLI
, X0
):
1350 case OE_SH(ROTLI
, X1
):
1351 case OE_SH(ROTLI
, Y0
):
1352 case OE_SH(ROTLI
, Y1
):
1353 tcg_gen_rotli_tl(tdest
, tsrca
, imm
);
1356 case OE_SH(SHLI
, X0
):
1357 case OE_SH(SHLI
, X1
):
1358 case OE_SH(SHLI
, Y0
):
1359 case OE_SH(SHLI
, Y1
):
1360 tcg_gen_shli_tl(tdest
, tsrca
, imm
);
1363 case OE_SH(SHLXI
, X0
):
1364 case OE_SH(SHLXI
, X1
):
1365 tcg_gen_shli_tl(tdest
, tsrca
, imm
& 31);
1366 tcg_gen_ext32s_tl(tdest
, tdest
);
1369 case OE_SH(SHRSI
, X0
):
1370 case OE_SH(SHRSI
, X1
):
1371 case OE_SH(SHRSI
, Y0
):
1372 case OE_SH(SHRSI
, Y1
):
1373 tcg_gen_sari_tl(tdest
, tsrca
, imm
);
1376 case OE_SH(SHRUI
, X0
):
1377 case OE_SH(SHRUI
, X1
):
1378 case OE_SH(SHRUI
, Y0
):
1379 case OE_SH(SHRUI
, Y1
):
1380 tcg_gen_shri_tl(tdest
, tsrca
, imm
);
1383 case OE_SH(SHRUXI
, X0
):
1384 case OE_SH(SHRUXI
, X1
):
1385 if ((imm
& 31) == 0) {
1386 tcg_gen_ext32s_tl(tdest
, tsrca
);
1388 tcg_gen_ext32u_tl(tdest
, tsrca
);
1389 tcg_gen_shri_tl(tdest
, tdest
, imm
& 31);
1393 case OE_SH(V1SHLI
, X0
):
1394 case OE_SH(V1SHLI
, X1
):
1395 case OE_SH(V1SHRSI
, X0
):
1396 case OE_SH(V1SHRSI
, X1
):
1397 case OE_SH(V1SHRUI
, X0
):
1398 case OE_SH(V1SHRUI
, X1
):
1399 case OE_SH(V2SHLI
, X0
):
1400 case OE_SH(V2SHLI
, X1
):
1401 case OE_SH(V2SHRSI
, X0
):
1402 case OE_SH(V2SHRSI
, X1
):
1403 case OE_SH(V2SHRUI
, X0
):
1404 case OE_SH(V2SHRUI
, X1
):
1405 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1407 case OE(ADDLI_OPCODE_X0
, 0, X0
):
1408 case OE(ADDLI_OPCODE_X1
, 0, X1
):
1409 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1412 case OE(ADDXLI_OPCODE_X0
, 0, X0
):
1413 case OE(ADDXLI_OPCODE_X1
, 0, X1
):
1414 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1415 tcg_gen_ext32s_tl(tdest
, tdest
);
1416 mnemonic
= "addxli";
1418 case OE(SHL16INSLI_OPCODE_X0
, 0, X0
):
1419 case OE(SHL16INSLI_OPCODE_X1
, 0, X1
):
1420 tcg_gen_shli_tl(tdest
, tsrca
, 16);
1421 tcg_gen_ori_tl(tdest
, tdest
, imm
& 0xffff);
1422 mnemonic
= "shl16insli";
1426 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1429 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", mnemonic
,
1430 reg_names
[dest
], reg_names
[srca
], imm
);
1431 return TILEGX_EXCP_NONE
;
1434 static TileExcp
gen_bf_opcode_x0(DisasContext
*dc
, unsigned ext
,
1435 unsigned dest
, unsigned srca
,
1436 unsigned bfs
, unsigned bfe
)
1438 TCGv tdest
= dest_gr(dc
, dest
);
1439 TCGv tsrca
= load_gr(dc
, srca
);
1442 const char *mnemonic
;
1444 /* The bitfield is either between E and S inclusive,
1445 or up from S and down from E inclusive. */
1447 len
= bfe
- bfs
+ 1;
1449 len
= (64 - bfs
) + (bfe
+ 1);
1453 case BFEXTU_BF_OPCODE_X0
:
1454 if (bfs
== 0 && bfe
== 7) {
1455 tcg_gen_ext8u_tl(tdest
, tsrca
);
1456 } else if (bfs
== 0 && bfe
== 15) {
1457 tcg_gen_ext16u_tl(tdest
, tsrca
);
1458 } else if (bfs
== 0 && bfe
== 31) {
1459 tcg_gen_ext32u_tl(tdest
, tsrca
);
1463 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1465 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1467 tcg_gen_shri_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1469 mnemonic
= "bfextu";
1472 case BFEXTS_BF_OPCODE_X0
:
1473 if (bfs
== 0 && bfe
== 7) {
1474 tcg_gen_ext8s_tl(tdest
, tsrca
);
1475 } else if (bfs
== 0 && bfe
== 15) {
1476 tcg_gen_ext16s_tl(tdest
, tsrca
);
1477 } else if (bfs
== 0 && bfe
== 31) {
1478 tcg_gen_ext32s_tl(tdest
, tsrca
);
1482 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1484 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1486 tcg_gen_sari_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1488 mnemonic
= "bfexts";
1491 case BFINS_BF_OPCODE_X0
:
1492 tsrcd
= load_gr(dc
, dest
);
1494 tcg_gen_deposit_tl(tdest
, tsrcd
, tsrca
, bfs
, len
);
1496 tcg_gen_rotri_tl(tdest
, tsrcd
, bfs
);
1497 tcg_gen_deposit_tl(tdest
, tdest
, tsrca
, 0, len
);
1498 tcg_gen_rotli_tl(tdest
, tdest
, bfs
);
1503 case MM_BF_OPCODE_X0
:
1504 tsrcd
= load_gr(dc
, dest
);
1506 tcg_gen_deposit_tl(tdest
, tsrca
, tsrcd
, 0, len
);
1508 uint64_t mask
= len
== 64 ? -1 : rol64((1ULL << len
) - 1, bfs
);
1509 TCGv tmp
= tcg_const_tl(mask
);
1511 tcg_gen_and_tl(tdest
, tsrcd
, tmp
);
1512 tcg_gen_andc_tl(tmp
, tsrca
, tmp
);
1513 tcg_gen_or_tl(tdest
, tdest
, tmp
);
1520 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1523 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %u, %u", mnemonic
,
1524 reg_names
[dest
], reg_names
[srca
], bfs
, bfe
);
1525 return TILEGX_EXCP_NONE
;
1528 static TileExcp
gen_branch_opcode_x1(DisasContext
*dc
, unsigned ext
,
1529 unsigned srca
, int off
)
1531 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1532 const char *mnemonic
;
1534 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1535 dc
->jmp
.val1
= tcg_temp_new();
1536 tcg_gen_mov_tl(dc
->jmp
.val1
, load_gr(dc
, srca
));
1538 /* Note that the "predict taken" opcodes have bit 0 clear.
1539 Therefore, fold the two cases together by setting bit 0. */
1541 case BEQZ_BRANCH_OPCODE_X1
:
1542 dc
->jmp
.cond
= TCG_COND_EQ
;
1545 case BNEZ_BRANCH_OPCODE_X1
:
1546 dc
->jmp
.cond
= TCG_COND_NE
;
1549 case BGEZ_BRANCH_OPCODE_X1
:
1550 dc
->jmp
.cond
= TCG_COND_GE
;
1553 case BGTZ_BRANCH_OPCODE_X1
:
1554 dc
->jmp
.cond
= TCG_COND_GT
;
1557 case BLEZ_BRANCH_OPCODE_X1
:
1558 dc
->jmp
.cond
= TCG_COND_LE
;
1561 case BLTZ_BRANCH_OPCODE_X1
:
1562 dc
->jmp
.cond
= TCG_COND_LT
;
1565 case BLBC_BRANCH_OPCODE_X1
:
1566 dc
->jmp
.cond
= TCG_COND_EQ
;
1567 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1570 case BLBS_BRANCH_OPCODE_X1
:
1571 dc
->jmp
.cond
= TCG_COND_NE
;
1572 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1576 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1579 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1580 qemu_log("%s%s %s, " TARGET_FMT_lx
" <%s>",
1581 mnemonic
, ext
& 1 ? "" : "t",
1582 reg_names
[srca
], tgt
, lookup_symbol(tgt
));
1584 return TILEGX_EXCP_NONE
;
1587 static TileExcp
gen_jump_opcode_x1(DisasContext
*dc
, unsigned ext
, int off
)
1589 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1590 const char *mnemonic
= "j";
1592 /* The extension field is 1 bit, therefore we only have JAL and J. */
1593 if (ext
== JAL_JUMP_OPCODE_X1
) {
1594 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
1595 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
1598 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
1599 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1601 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1602 qemu_log("%s " TARGET_FMT_lx
" <%s>",
1603 mnemonic
, tgt
, lookup_symbol(tgt
));
1605 return TILEGX_EXCP_NONE
;
1611 void (*get
)(TCGv
, TCGv_ptr
);
1612 void (*put
)(TCGv_ptr
, TCGv
);
1615 static const TileSPR
*find_spr(unsigned spr
)
1617 /* Allow the compiler to construct the binary search tree. */
1618 #define D(N, O, G, P) \
1619 case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
1623 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CMPEXCH
]), 0, 0)
1624 D(INTERRUPT_CRITICAL_SECTION
,
1625 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CRITICAL_SEC
]), 0, 0)
1627 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_SIM_CONTROL
]), 0, 0)
1632 qemu_log_mask(LOG_UNIMP
, "UNIMP SPR %u\n", spr
);
1636 static TileExcp
gen_mtspr_x1(DisasContext
*dc
, unsigned spr
, unsigned srca
)
1638 const TileSPR
*def
= find_spr(spr
);
1642 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr spr[%u], %s", spr
, reg_names
[srca
]);
1643 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1646 tsrca
= load_gr(dc
, srca
);
1648 def
->put(cpu_env
, tsrca
);
1650 tcg_gen_st_tl(tsrca
, cpu_env
, def
->offset
);
1652 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, %s", def
->name
, reg_names
[srca
]);
1653 return TILEGX_EXCP_NONE
;
1656 static TileExcp
gen_mfspr_x1(DisasContext
*dc
, unsigned dest
, unsigned spr
)
1658 const TileSPR
*def
= find_spr(spr
);
1662 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, spr[%u]", reg_names
[dest
], spr
);
1663 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1666 tdest
= dest_gr(dc
, dest
);
1668 def
->get(tdest
, cpu_env
);
1670 tcg_gen_ld_tl(tdest
, cpu_env
, def
->offset
);
1672 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mfspr %s, %s", reg_names
[dest
], def
->name
);
1673 return TILEGX_EXCP_NONE
;
1676 static TileExcp
decode_y0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1678 unsigned opc
= get_Opcode_Y0(bundle
);
1679 unsigned ext
= get_RRROpcodeExtension_Y0(bundle
);
1680 unsigned dest
= get_Dest_Y0(bundle
);
1681 unsigned srca
= get_SrcA_Y0(bundle
);
1686 case RRR_1_OPCODE_Y0
:
1687 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1688 ext
= get_UnaryOpcodeExtension_Y0(bundle
);
1689 return gen_rr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
);
1692 case RRR_0_OPCODE_Y0
:
1693 case RRR_2_OPCODE_Y0
:
1694 case RRR_3_OPCODE_Y0
:
1695 case RRR_4_OPCODE_Y0
:
1696 case RRR_5_OPCODE_Y0
:
1697 case RRR_6_OPCODE_Y0
:
1698 case RRR_7_OPCODE_Y0
:
1699 case RRR_8_OPCODE_Y0
:
1700 case RRR_9_OPCODE_Y0
:
1701 srcb
= get_SrcB_Y0(bundle
);
1702 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, srcb
);
1704 case SHIFT_OPCODE_Y0
:
1705 ext
= get_ShiftOpcodeExtension_Y0(bundle
);
1706 imm
= get_ShAmt_Y0(bundle
);
1707 return gen_rri_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, imm
);
1709 case ADDI_OPCODE_Y0
:
1710 case ADDXI_OPCODE_Y0
:
1711 case ANDI_OPCODE_Y0
:
1712 case CMPEQI_OPCODE_Y0
:
1713 case CMPLTSI_OPCODE_Y0
:
1714 imm
= (int8_t)get_Imm8_Y0(bundle
);
1715 return gen_rri_opcode(dc
, OE(opc
, 0, Y0
), dest
, srca
, imm
);
1718 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1722 static TileExcp
decode_y1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1724 unsigned opc
= get_Opcode_Y1(bundle
);
1725 unsigned ext
= get_RRROpcodeExtension_Y1(bundle
);
1726 unsigned dest
= get_Dest_Y1(bundle
);
1727 unsigned srca
= get_SrcA_Y1(bundle
);
1731 switch (get_Opcode_Y1(bundle
)) {
1732 case RRR_1_OPCODE_Y1
:
1733 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1734 ext
= get_UnaryOpcodeExtension_Y1(bundle
);
1735 return gen_rr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
);
1738 case RRR_0_OPCODE_Y1
:
1739 case RRR_2_OPCODE_Y1
:
1740 case RRR_3_OPCODE_Y1
:
1741 case RRR_4_OPCODE_Y1
:
1742 case RRR_5_OPCODE_Y1
:
1743 case RRR_6_OPCODE_Y1
:
1744 case RRR_7_OPCODE_Y1
:
1745 srcb
= get_SrcB_Y1(bundle
);
1746 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, srcb
);
1748 case SHIFT_OPCODE_Y1
:
1749 ext
= get_ShiftOpcodeExtension_Y1(bundle
);
1750 imm
= get_ShAmt_Y1(bundle
);
1751 return gen_rri_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, imm
);
1753 case ADDI_OPCODE_Y1
:
1754 case ADDXI_OPCODE_Y1
:
1755 case ANDI_OPCODE_Y1
:
1756 case CMPEQI_OPCODE_Y1
:
1757 case CMPLTSI_OPCODE_Y1
:
1758 imm
= (int8_t)get_Imm8_Y1(bundle
);
1759 return gen_rri_opcode(dc
, OE(opc
, 0, Y1
), dest
, srca
, imm
);
1762 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1766 static TileExcp
decode_y2(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1768 unsigned mode
= get_Mode(bundle
);
1769 unsigned opc
= get_Opcode_Y2(bundle
);
1770 unsigned srca
= get_SrcA_Y2(bundle
);
1771 unsigned srcbdest
= get_SrcBDest_Y2(bundle
);
1772 const char *mnemonic
;
1775 switch (OEY2(opc
, mode
)) {
1776 case OEY2(LD1S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1780 case OEY2(LD1U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1784 case OEY2(LD2S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1788 case OEY2(LD2U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1792 case OEY2(LD4S_OPCODE_Y2
, MODE_OPCODE_YB2
):
1796 case OEY2(LD4U_OPCODE_Y2
, MODE_OPCODE_YB2
):
1800 case OEY2(LD_OPCODE_Y2
, MODE_OPCODE_YB2
):
1804 tcg_gen_qemu_ld_tl(dest_gr(dc
, srcbdest
), load_gr(dc
, srca
),
1806 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
1807 reg_names
[srcbdest
], reg_names
[srca
]);
1808 return TILEGX_EXCP_NONE
;
1810 case OEY2(ST1_OPCODE_Y2
, MODE_OPCODE_YC2
):
1811 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_UB
, "st1");
1812 case OEY2(ST2_OPCODE_Y2
, MODE_OPCODE_YC2
):
1813 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUW
, "st2");
1814 case OEY2(ST4_OPCODE_Y2
, MODE_OPCODE_YC2
):
1815 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUL
, "st4");
1816 case OEY2(ST_OPCODE_Y2
, MODE_OPCODE_YC2
):
1817 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEQ
, "st");
1820 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1824 static TileExcp
decode_x0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1826 unsigned opc
= get_Opcode_X0(bundle
);
1827 unsigned dest
= get_Dest_X0(bundle
);
1828 unsigned srca
= get_SrcA_X0(bundle
);
1829 unsigned ext
, srcb
, bfs
, bfe
;
1833 case RRR_0_OPCODE_X0
:
1834 ext
= get_RRROpcodeExtension_X0(bundle
);
1835 if (ext
== UNARY_RRR_0_OPCODE_X0
) {
1836 ext
= get_UnaryOpcodeExtension_X0(bundle
);
1837 return gen_rr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
);
1839 srcb
= get_SrcB_X0(bundle
);
1840 return gen_rrr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, srcb
);
1842 case SHIFT_OPCODE_X0
:
1843 ext
= get_ShiftOpcodeExtension_X0(bundle
);
1844 imm
= get_ShAmt_X0(bundle
);
1845 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
1847 case IMM8_OPCODE_X0
:
1848 ext
= get_Imm8OpcodeExtension_X0(bundle
);
1849 imm
= (int8_t)get_Imm8_X0(bundle
);
1850 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
1853 ext
= get_BFOpcodeExtension_X0(bundle
);
1854 bfs
= get_BFStart_X0(bundle
);
1855 bfe
= get_BFEnd_X0(bundle
);
1856 return gen_bf_opcode_x0(dc
, ext
, dest
, srca
, bfs
, bfe
);
1858 case ADDLI_OPCODE_X0
:
1859 case SHL16INSLI_OPCODE_X0
:
1860 case ADDXLI_OPCODE_X0
:
1861 imm
= (int16_t)get_Imm16_X0(bundle
);
1862 return gen_rri_opcode(dc
, OE(opc
, 0, X0
), dest
, srca
, imm
);
1865 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1869 static TileExcp
decode_x1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1871 unsigned opc
= get_Opcode_X1(bundle
);
1872 unsigned dest
= get_Dest_X1(bundle
);
1873 unsigned srca
= get_SrcA_X1(bundle
);
1878 case RRR_0_OPCODE_X1
:
1879 ext
= get_RRROpcodeExtension_X1(bundle
);
1880 srcb
= get_SrcB_X1(bundle
);
1882 case UNARY_RRR_0_OPCODE_X1
:
1883 ext
= get_UnaryOpcodeExtension_X1(bundle
);
1884 return gen_rr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
);
1885 case ST1_RRR_0_OPCODE_X1
:
1886 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "st1");
1887 case ST2_RRR_0_OPCODE_X1
:
1888 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "st2");
1889 case ST4_RRR_0_OPCODE_X1
:
1890 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "st4");
1891 case STNT1_RRR_0_OPCODE_X1
:
1892 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "stnt1");
1893 case STNT2_RRR_0_OPCODE_X1
:
1894 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "stnt2");
1895 case STNT4_RRR_0_OPCODE_X1
:
1896 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "stnt4");
1897 case STNT_RRR_0_OPCODE_X1
:
1898 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "stnt");
1899 case ST_RRR_0_OPCODE_X1
:
1900 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "st");
1902 return gen_rrr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, srcb
);
1904 case SHIFT_OPCODE_X1
:
1905 ext
= get_ShiftOpcodeExtension_X1(bundle
);
1906 imm
= get_ShAmt_X1(bundle
);
1907 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
1909 case IMM8_OPCODE_X1
:
1910 ext
= get_Imm8OpcodeExtension_X1(bundle
);
1911 imm
= (int8_t)get_Dest_Imm8_X1(bundle
);
1912 srcb
= get_SrcB_X1(bundle
);
1914 case ST1_ADD_IMM8_OPCODE_X1
:
1915 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "st1_add");
1916 case ST2_ADD_IMM8_OPCODE_X1
:
1917 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "st2_add");
1918 case ST4_ADD_IMM8_OPCODE_X1
:
1919 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "st4_add");
1920 case STNT1_ADD_IMM8_OPCODE_X1
:
1921 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "stnt1_add");
1922 case STNT2_ADD_IMM8_OPCODE_X1
:
1923 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "stnt2_add");
1924 case STNT4_ADD_IMM8_OPCODE_X1
:
1925 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "stnt4_add");
1926 case STNT_ADD_IMM8_OPCODE_X1
:
1927 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "stnt_add");
1928 case ST_ADD_IMM8_OPCODE_X1
:
1929 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "st_add");
1930 case MFSPR_IMM8_OPCODE_X1
:
1931 return gen_mfspr_x1(dc
, dest
, get_MF_Imm14_X1(bundle
));
1932 case MTSPR_IMM8_OPCODE_X1
:
1933 return gen_mtspr_x1(dc
, get_MT_Imm14_X1(bundle
), srca
);
1935 imm
= (int8_t)get_Imm8_X1(bundle
);
1936 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
1938 case BRANCH_OPCODE_X1
:
1939 ext
= get_BrType_X1(bundle
);
1940 imm
= sextract32(get_BrOff_X1(bundle
), 0, 17);
1941 return gen_branch_opcode_x1(dc
, ext
, srca
, imm
);
1943 case JUMP_OPCODE_X1
:
1944 ext
= get_JumpOpcodeExtension_X1(bundle
);
1945 imm
= sextract32(get_JumpOff_X1(bundle
), 0, 27);
1946 return gen_jump_opcode_x1(dc
, ext
, imm
);
1948 case ADDLI_OPCODE_X1
:
1949 case SHL16INSLI_OPCODE_X1
:
1950 case ADDXLI_OPCODE_X1
:
1951 imm
= (int16_t)get_Imm16_X1(bundle
);
1952 return gen_rri_opcode(dc
, OE(opc
, 0, X1
), dest
, srca
, imm
);
1955 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1959 static void notice_excp(DisasContext
*dc
, uint64_t bundle
,
1960 const char *type
, TileExcp excp
)
1962 if (likely(excp
== TILEGX_EXCP_NONE
)) {
1965 gen_exception(dc
, excp
);
1966 if (excp
== TILEGX_EXCP_OPCODE_UNIMPLEMENTED
) {
1967 qemu_log_mask(LOG_UNIMP
, "UNIMP %s, [" FMT64X
"]\n", type
, bundle
);
1971 static void translate_one_bundle(DisasContext
*dc
, uint64_t bundle
)
1975 for (i
= 0; i
< ARRAY_SIZE(dc
->wb
); i
++) {
1976 DisasContextTemp
*wb
= &dc
->wb
[i
];
1977 wb
->reg
= TILEGX_R_NOREG
;
1978 TCGV_UNUSED_I64(wb
->val
);
1982 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1983 tcg_gen_debug_insn_start(dc
->pc
);
1986 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %" PRIx64
": { ", dc
->pc
);
1987 if (get_Mode(bundle
)) {
1988 notice_excp(dc
, bundle
, "y0", decode_y0(dc
, bundle
));
1989 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
1990 notice_excp(dc
, bundle
, "y1", decode_y1(dc
, bundle
));
1991 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
1992 notice_excp(dc
, bundle
, "y2", decode_y2(dc
, bundle
));
1994 notice_excp(dc
, bundle
, "x0", decode_x0(dc
, bundle
));
1995 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
1996 notice_excp(dc
, bundle
, "x1", decode_x1(dc
, bundle
));
1998 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " }\n");
2000 for (i
= dc
->num_wb
- 1; i
>= 0; --i
) {
2001 DisasContextTemp
*wb
= &dc
->wb
[i
];
2002 if (wb
->reg
< TILEGX_R_COUNT
) {
2003 tcg_gen_mov_i64(cpu_regs
[wb
->reg
], wb
->val
);
2005 tcg_temp_free_i64(wb
->val
);
2008 if (dc
->jmp
.cond
!= TCG_COND_NEVER
) {
2009 if (dc
->jmp
.cond
== TCG_COND_ALWAYS
) {
2010 tcg_gen_mov_i64(cpu_pc
, dc
->jmp
.dest
);
2012 TCGv next
= tcg_const_i64(dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
2013 tcg_gen_movcond_i64(dc
->jmp
.cond
, cpu_pc
,
2014 dc
->jmp
.val1
, load_zero(dc
),
2015 dc
->jmp
.dest
, next
);
2016 tcg_temp_free_i64(dc
->jmp
.val1
);
2017 tcg_temp_free_i64(next
);
2019 tcg_temp_free_i64(dc
->jmp
.dest
);
2022 } else if (dc
->atomic_excp
!= TILEGX_EXCP_NONE
) {
2023 gen_exception(dc
, dc
->atomic_excp
);
2027 static inline void gen_intermediate_code_internal(TileGXCPU
*cpu
,
2028 TranslationBlock
*tb
,
2032 DisasContext
*dc
= &ctx
;
2033 CPUState
*cs
= CPU(cpu
);
2034 CPUTLGState
*env
= &cpu
->env
;
2035 uint64_t pc_start
= tb
->pc
;
2036 uint64_t next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2039 int max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2043 dc
->exit_tb
= false;
2044 dc
->atomic_excp
= TILEGX_EXCP_NONE
;
2045 dc
->jmp
.cond
= TCG_COND_NEVER
;
2046 TCGV_UNUSED_I64(dc
->jmp
.dest
);
2047 TCGV_UNUSED_I64(dc
->jmp
.val1
);
2048 TCGV_UNUSED_I64(dc
->zero
);
2050 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2051 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2054 max_insns
= CF_COUNT_MASK
;
2056 if (cs
->singlestep_enabled
|| singlestep
) {
2063 j
= tcg_op_buf_count();
2067 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2070 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
2071 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2072 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2074 translate_one_bundle(dc
, cpu_ldq_data(env
, dc
->pc
));
2077 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
2080 dc
->pc
+= TILEGX_BUNDLE_SIZE_IN_BYTES
;
2081 if (++num_insns
>= max_insns
2082 || dc
->pc
>= next_page_start
2083 || tcg_op_buf_full()) {
2084 /* Ending the TB due to TB size or page boundary. Set PC. */
2085 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
2091 gen_tb_end(tb
, num_insns
);
2093 j
= tcg_op_buf_count();
2096 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2099 tb
->size
= dc
->pc
- pc_start
;
2100 tb
->icount
= num_insns
;
2103 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2106 void gen_intermediate_code(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2108 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, false);
2111 void gen_intermediate_code_pc(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2113 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, true);
2116 void restore_state_to_opc(CPUTLGState
*env
, TranslationBlock
*tb
, int pc_pos
)
2118 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2121 void tilegx_tcg_init(void)
2125 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
2126 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUTLGState
, pc
), "pc");
2127 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
2128 cpu_regs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
2129 offsetof(CPUTLGState
, regs
[i
]),