4 * Copyright (c) 2015 Chen Gang
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
27 #include "spr_def_64.h"
29 #define FMT64X "%016" PRIx64
31 static TCGv_ptr cpu_env
;
33 static TCGv cpu_regs
[TILEGX_R_COUNT
];
35 static const char * const reg_names
[64] = {
36 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
37 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
38 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
39 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
40 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
41 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
42 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
43 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
46 /* Modified registers are cached in temporaries until the end of the bundle. */
52 #define MAX_WRITEBACK 4
54 /* This is the state at translation time. */
56 uint64_t pc
; /* Current pc */
58 TCGv zero
; /* For zero register */
60 DisasContextTemp wb
[MAX_WRITEBACK
];
67 TCGCond cond
; /* branch condition */
68 TCGv dest
; /* branch destination */
69 TCGv val1
; /* value to be compared against zero, for cond */
70 } jmp
; /* Jump object, only once in each TB block */
73 #include "exec/gen-icount.h"
75 /* Differentiate the various pipe encodings. */
81 /* Remerge the base opcode and extension fields for switching.
82 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
83 Y2 opcode field is 2 bits. */
84 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
86 /* Similar, but for Y2 only. */
87 #define OEY2(OP, MODE) (OP + MODE * 4)
89 /* Similar, but make sure opcode names match up. */
90 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
91 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
92 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
93 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
94 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
95 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
96 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
98 #define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
101 static void gen_exception(DisasContext
*dc
, TileExcp num
)
105 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
107 tmp
= tcg_const_i32(num
);
108 gen_helper_exception(cpu_env
, tmp
);
109 tcg_temp_free_i32(tmp
);
113 static bool check_gr(DisasContext
*dc
, uint8_t reg
)
115 if (likely(reg
< TILEGX_R_COUNT
)) {
125 gen_exception(dc
, TILEGX_EXCP_REG_IDN_ACCESS
);
131 gen_exception(dc
, TILEGX_EXCP_REG_UDN_ACCESS
);
134 g_assert_not_reached();
139 static TCGv
load_zero(DisasContext
*dc
)
141 if (TCGV_IS_UNUSED_I64(dc
->zero
)) {
142 dc
->zero
= tcg_const_i64(0);
147 static TCGv
load_gr(DisasContext
*dc
, unsigned reg
)
149 if (check_gr(dc
, reg
)) {
150 return cpu_regs
[reg
];
152 return load_zero(dc
);
155 static TCGv
dest_gr(DisasContext
*dc
, unsigned reg
)
159 /* Skip the result, mark the exception if necessary, and continue */
164 return dc
->wb
[n
].val
= tcg_temp_new_i64();
167 static void gen_saturate_op(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
168 void (*operate
)(TCGv
, TCGv
, TCGv
))
170 TCGv t0
= tcg_temp_new();
172 tcg_gen_ext32s_tl(tdest
, tsrca
);
173 tcg_gen_ext32s_tl(t0
, tsrcb
);
174 operate(tdest
, tdest
, t0
);
176 tcg_gen_movi_tl(t0
, 0x7fffffff);
177 tcg_gen_movcond_tl(TCG_COND_GT
, tdest
, tdest
, t0
, t0
, tdest
);
178 tcg_gen_movi_tl(t0
, -0x80000000LL
);
179 tcg_gen_movcond_tl(TCG_COND_LT
, tdest
, tdest
, t0
, t0
, tdest
);
184 static void gen_atomic_excp(DisasContext
*dc
, unsigned dest
, TCGv tdest
,
185 TCGv tsrca
, TCGv tsrcb
, TileExcp excp
)
187 #ifdef CONFIG_USER_ONLY
190 tcg_gen_st_tl(tsrca
, cpu_env
, offsetof(CPUTLGState
, atomic_srca
));
191 tcg_gen_st_tl(tsrcb
, cpu_env
, offsetof(CPUTLGState
, atomic_srcb
));
192 t
= tcg_const_i32(dest
);
193 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUTLGState
, atomic_dstr
));
194 tcg_temp_free_i32(t
);
196 /* We're going to write the real result in the exception. But in
197 the meantime we've already created a writeback register, and
198 we don't want that to remain uninitialized. */
199 tcg_gen_movi_tl(tdest
, 0);
201 /* Note that we need to delay issuing the exception that implements
202 the atomic operation until after writing back the results of the
203 instruction occupying the X0 pipe. */
204 dc
->atomic_excp
= excp
;
206 gen_exception(dc
, TILEGX_EXCP_OPCODE_UNIMPLEMENTED
);
210 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
211 specified by the bottom 3 bits of TSRCB, and set TDEST to the
212 low 64 bits of the resulting value. */
213 static void gen_dblalign(TCGv tdest
, TCGv tsrcd
, TCGv tsrca
, TCGv tsrcb
)
215 TCGv t0
= tcg_temp_new();
217 tcg_gen_andi_tl(t0
, tsrcb
, 7);
218 tcg_gen_shli_tl(t0
, t0
, 3);
219 tcg_gen_shr_tl(tdest
, tsrcd
, t0
);
221 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
222 arithmetic on a 6-bit field tells us that 64 - t0 is equal
223 to (t0 ^ 63) + 1. So we can do the shift in two parts,
224 neither of which will be an invalid shift by 64. */
225 tcg_gen_xori_tl(t0
, t0
, 63);
226 tcg_gen_shl_tl(t0
, tsrca
, t0
);
227 tcg_gen_shli_tl(t0
, t0
, 1);
228 tcg_gen_or_tl(tdest
, tdest
, t0
);
233 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
234 right shift is an immediate. */
235 static void gen_dblaligni(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
, int shr
)
237 TCGv t0
= tcg_temp_new();
239 tcg_gen_shri_tl(t0
, tsrcb
, shr
);
240 tcg_gen_shli_tl(tdest
, tsrca
, 64 - shr
);
241 tcg_gen_or_tl(tdest
, tdest
, t0
);
250 static void gen_ext_half(TCGv d
, TCGv s
, MulHalf h
)
254 tcg_gen_ext32u_tl(d
, s
);
257 tcg_gen_ext32s_tl(d
, s
);
260 tcg_gen_shri_tl(d
, s
, 32);
263 tcg_gen_sari_tl(d
, s
, 32);
268 static void gen_mul_half(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
269 MulHalf ha
, MulHalf hb
)
271 TCGv t
= tcg_temp_new();
272 gen_ext_half(t
, tsrca
, ha
);
273 gen_ext_half(tdest
, tsrcb
, hb
);
274 tcg_gen_mul_tl(tdest
, tdest
, t
);
278 /* Equality comparison with zero can be done quickly and efficiently. */
279 static void gen_v1cmpeq0(TCGv v
)
281 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
282 TCGv c
= tcg_temp_new();
284 /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
285 tcg_gen_and_tl(c
, v
, m
);
286 tcg_gen_add_tl(c
, c
, m
);
287 tcg_gen_or_tl(c
, c
, m
);
288 tcg_gen_nor_tl(c
, c
, v
);
291 /* Shift the msb down to form the lsb boolean result. */
292 tcg_gen_shri_tl(v
, c
, 7);
296 static void gen_v1cmpne0(TCGv v
)
298 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
299 TCGv c
= tcg_temp_new();
301 /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
302 tcg_gen_and_tl(c
, v
, m
);
303 tcg_gen_add_tl(c
, c
, m
);
304 tcg_gen_or_tl(c
, c
, v
);
305 tcg_gen_andc_tl(c
, c
, m
);
308 /* Shift the msb down to form the lsb boolean result. */
309 tcg_gen_shri_tl(v
, c
, 7);
313 static TileExcp
gen_st_opcode(DisasContext
*dc
, unsigned dest
, unsigned srca
,
314 unsigned srcb
, TCGMemOp memop
, const char *name
)
317 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
320 tcg_gen_qemu_st_tl(load_gr(dc
, srcb
), load_gr(dc
, srca
),
323 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", name
,
324 reg_names
[srca
], reg_names
[srcb
]);
325 return TILEGX_EXCP_NONE
;
328 static TileExcp
gen_st_add_opcode(DisasContext
*dc
, unsigned srca
, unsigned srcb
,
329 int imm
, TCGMemOp memop
, const char *name
)
331 TCGv tsrca
= load_gr(dc
, srca
);
332 TCGv tsrcb
= load_gr(dc
, srcb
);
334 tcg_gen_qemu_st_tl(tsrcb
, tsrca
, dc
->mmuidx
, memop
);
335 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
337 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", name
,
338 reg_names
[srca
], reg_names
[srcb
], imm
);
339 return TILEGX_EXCP_NONE
;
342 static TileExcp
gen_rr_opcode(DisasContext
*dc
, unsigned opext
,
343 unsigned dest
, unsigned srca
)
346 const char *mnemonic
;
348 TileExcp ret
= TILEGX_EXCP_NONE
;
350 /* Eliminate instructions with no output before doing anything else. */
364 case OE_RR_X1(DRAIN
):
367 case OE_RR_X1(FLUSHWB
):
368 mnemonic
= "flushwb";
372 mnemonic
= (dest
== 0x1c && srca
== 0x25 ? "bpt" : "ill");
373 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
374 return TILEGX_EXCP_OPCODE_UNKNOWN
;
379 /* ??? This should yield, especially in system mode. */
382 case OE_RR_X1(SWINT0
):
383 case OE_RR_X1(SWINT2
):
384 case OE_RR_X1(SWINT3
):
385 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
386 case OE_RR_X1(SWINT1
):
387 ret
= TILEGX_EXCP_SYSCALL
;
391 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
393 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
396 case OE_RR_X1(DTLBPR
):
397 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
401 case OE_RR_X1(FLUSH
):
421 case OE_RR_X1(JALRP
):
422 case OE_RR_Y1(JALRP
):
429 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
430 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
432 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
433 dc
->jmp
.dest
= tcg_temp_new();
434 tcg_gen_andi_tl(dc
->jmp
.dest
, load_gr(dc
, srca
), ~7);
437 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
439 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s", mnemonic
, reg_names
[srca
]);
443 tdest
= dest_gr(dc
, dest
);
444 tsrca
= load_gr(dc
, srca
);
447 case OE_RR_X0(CNTLZ
):
448 case OE_RR_Y0(CNTLZ
):
449 gen_helper_cntlz(tdest
, tsrca
);
452 case OE_RR_X0(CNTTZ
):
453 case OE_RR_Y0(CNTTZ
):
454 gen_helper_cnttz(tdest
, tsrca
);
457 case OE_RR_X0(FSINGLE_PACK1
):
458 case OE_RR_Y0(FSINGLE_PACK1
):
460 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
485 case OE_RR_X1(LDNT1S
):
489 case OE_RR_X1(LDNT1U
):
493 case OE_RR_X1(LDNT2S
):
497 case OE_RR_X1(LDNT2U
):
501 case OE_RR_X1(LDNT4S
):
505 case OE_RR_X1(LDNT4U
):
517 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
520 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
521 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
527 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
529 tcg_gen_movi_tl(tdest
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
534 gen_helper_pcnt(tdest
, tsrca
);
537 case OE_RR_X0(REVBITS
):
538 case OE_RR_Y0(REVBITS
):
539 gen_helper_revbits(tdest
, tsrca
);
540 mnemonic
= "revbits";
542 case OE_RR_X0(REVBYTES
):
543 case OE_RR_Y0(REVBYTES
):
544 tcg_gen_bswap64_tl(tdest
, tsrca
);
545 mnemonic
= "revbytes";
547 case OE_RR_X0(TBLIDXB0
):
548 case OE_RR_Y0(TBLIDXB0
):
549 case OE_RR_X0(TBLIDXB1
):
550 case OE_RR_Y0(TBLIDXB1
):
551 case OE_RR_X0(TBLIDXB2
):
552 case OE_RR_Y0(TBLIDXB2
):
553 case OE_RR_X0(TBLIDXB3
):
554 case OE_RR_Y0(TBLIDXB3
):
556 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
559 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
560 reg_names
[dest
], reg_names
[srca
]);
564 static TileExcp
gen_rrr_opcode(DisasContext
*dc
, unsigned opext
,
565 unsigned dest
, unsigned srca
, unsigned srcb
)
567 TCGv tdest
= dest_gr(dc
, dest
);
568 TCGv tsrca
= load_gr(dc
, srca
);
569 TCGv tsrcb
= load_gr(dc
, srcb
);
571 const char *mnemonic
;
574 case OE_RRR(ADDXSC
, 0, X0
):
575 case OE_RRR(ADDXSC
, 0, X1
):
576 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_add_tl
);
579 case OE_RRR(ADDX
, 0, X0
):
580 case OE_RRR(ADDX
, 0, X1
):
581 case OE_RRR(ADDX
, 0, Y0
):
582 case OE_RRR(ADDX
, 0, Y1
):
583 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
584 tcg_gen_ext32s_tl(tdest
, tdest
);
587 case OE_RRR(ADD
, 0, X0
):
588 case OE_RRR(ADD
, 0, X1
):
589 case OE_RRR(ADD
, 0, Y0
):
590 case OE_RRR(ADD
, 0, Y1
):
591 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
594 case OE_RRR(AND
, 0, X0
):
595 case OE_RRR(AND
, 0, X1
):
596 case OE_RRR(AND
, 5, Y0
):
597 case OE_RRR(AND
, 5, Y1
):
598 tcg_gen_and_tl(tdest
, tsrca
, tsrcb
);
601 case OE_RRR(CMOVEQZ
, 0, X0
):
602 case OE_RRR(CMOVEQZ
, 4, Y0
):
603 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, load_zero(dc
),
604 tsrcb
, load_gr(dc
, dest
));
605 mnemonic
= "cmoveqz";
607 case OE_RRR(CMOVNEZ
, 0, X0
):
608 case OE_RRR(CMOVNEZ
, 4, Y0
):
609 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, load_zero(dc
),
610 tsrcb
, load_gr(dc
, dest
));
611 mnemonic
= "cmovnez";
613 case OE_RRR(CMPEQ
, 0, X0
):
614 case OE_RRR(CMPEQ
, 0, X1
):
615 case OE_RRR(CMPEQ
, 3, Y0
):
616 case OE_RRR(CMPEQ
, 3, Y1
):
617 tcg_gen_setcond_tl(TCG_COND_EQ
, tdest
, tsrca
, tsrcb
);
620 case OE_RRR(CMPEXCH4
, 0, X1
):
621 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
622 TILEGX_EXCP_OPCODE_CMPEXCH4
);
623 mnemonic
= "cmpexch4";
625 case OE_RRR(CMPEXCH
, 0, X1
):
626 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
627 TILEGX_EXCP_OPCODE_CMPEXCH
);
628 mnemonic
= "cmpexch";
630 case OE_RRR(CMPLES
, 0, X0
):
631 case OE_RRR(CMPLES
, 0, X1
):
632 case OE_RRR(CMPLES
, 2, Y0
):
633 case OE_RRR(CMPLES
, 2, Y1
):
634 tcg_gen_setcond_tl(TCG_COND_LE
, tdest
, tsrca
, tsrcb
);
637 case OE_RRR(CMPLEU
, 0, X0
):
638 case OE_RRR(CMPLEU
, 0, X1
):
639 case OE_RRR(CMPLEU
, 2, Y0
):
640 case OE_RRR(CMPLEU
, 2, Y1
):
641 tcg_gen_setcond_tl(TCG_COND_LEU
, tdest
, tsrca
, tsrcb
);
644 case OE_RRR(CMPLTS
, 0, X0
):
645 case OE_RRR(CMPLTS
, 0, X1
):
646 case OE_RRR(CMPLTS
, 2, Y0
):
647 case OE_RRR(CMPLTS
, 2, Y1
):
648 tcg_gen_setcond_tl(TCG_COND_LT
, tdest
, tsrca
, tsrcb
);
651 case OE_RRR(CMPLTU
, 0, X0
):
652 case OE_RRR(CMPLTU
, 0, X1
):
653 case OE_RRR(CMPLTU
, 2, Y0
):
654 case OE_RRR(CMPLTU
, 2, Y1
):
655 tcg_gen_setcond_tl(TCG_COND_LTU
, tdest
, tsrca
, tsrcb
);
658 case OE_RRR(CMPNE
, 0, X0
):
659 case OE_RRR(CMPNE
, 0, X1
):
660 case OE_RRR(CMPNE
, 3, Y0
):
661 case OE_RRR(CMPNE
, 3, Y1
):
662 tcg_gen_setcond_tl(TCG_COND_NE
, tdest
, tsrca
, tsrcb
);
665 case OE_RRR(CMULAF
, 0, X0
):
666 case OE_RRR(CMULA
, 0, X0
):
667 case OE_RRR(CMULFR
, 0, X0
):
668 case OE_RRR(CMULF
, 0, X0
):
669 case OE_RRR(CMULHR
, 0, X0
):
670 case OE_RRR(CMULH
, 0, X0
):
671 case OE_RRR(CMUL
, 0, X0
):
672 case OE_RRR(CRC32_32
, 0, X0
):
673 case OE_RRR(CRC32_8
, 0, X0
):
674 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
675 case OE_RRR(DBLALIGN2
, 0, X0
):
676 case OE_RRR(DBLALIGN2
, 0, X1
):
677 gen_dblaligni(tdest
, tsrca
, tsrcb
, 16);
678 mnemonic
= "dblalign2";
680 case OE_RRR(DBLALIGN4
, 0, X0
):
681 case OE_RRR(DBLALIGN4
, 0, X1
):
682 gen_dblaligni(tdest
, tsrca
, tsrcb
, 32);
683 mnemonic
= "dblalign4";
685 case OE_RRR(DBLALIGN6
, 0, X0
):
686 case OE_RRR(DBLALIGN6
, 0, X1
):
687 gen_dblaligni(tdest
, tsrca
, tsrcb
, 48);
688 mnemonic
= "dblalign6";
690 case OE_RRR(DBLALIGN
, 0, X0
):
691 gen_dblalign(tdest
, load_gr(dc
, dest
), tsrca
, tsrcb
);
692 mnemonic
= "dblalign";
694 case OE_RRR(EXCH4
, 0, X1
):
695 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
696 TILEGX_EXCP_OPCODE_EXCH4
);
699 case OE_RRR(EXCH
, 0, X1
):
700 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
701 TILEGX_EXCP_OPCODE_EXCH
);
704 case OE_RRR(FDOUBLE_ADDSUB
, 0, X0
):
705 case OE_RRR(FDOUBLE_ADD_FLAGS
, 0, X0
):
706 case OE_RRR(FDOUBLE_MUL_FLAGS
, 0, X0
):
707 case OE_RRR(FDOUBLE_PACK1
, 0, X0
):
708 case OE_RRR(FDOUBLE_PACK2
, 0, X0
):
709 case OE_RRR(FDOUBLE_SUB_FLAGS
, 0, X0
):
710 case OE_RRR(FDOUBLE_UNPACK_MAX
, 0, X0
):
711 case OE_RRR(FDOUBLE_UNPACK_MIN
, 0, X0
):
712 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
713 case OE_RRR(FETCHADD4
, 0, X1
):
714 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
715 TILEGX_EXCP_OPCODE_FETCHADD4
);
716 mnemonic
= "fetchadd4";
718 case OE_RRR(FETCHADDGEZ4
, 0, X1
):
719 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
720 TILEGX_EXCP_OPCODE_FETCHADDGEZ4
);
721 mnemonic
= "fetchaddgez4";
723 case OE_RRR(FETCHADDGEZ
, 0, X1
):
724 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
725 TILEGX_EXCP_OPCODE_FETCHADDGEZ
);
726 mnemonic
= "fetchaddgez";
728 case OE_RRR(FETCHADD
, 0, X1
):
729 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
730 TILEGX_EXCP_OPCODE_FETCHADD
);
731 mnemonic
= "fetchadd";
733 case OE_RRR(FETCHAND4
, 0, X1
):
734 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
735 TILEGX_EXCP_OPCODE_FETCHAND4
);
736 mnemonic
= "fetchand4";
738 case OE_RRR(FETCHAND
, 0, X1
):
739 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
740 TILEGX_EXCP_OPCODE_FETCHAND
);
741 mnemonic
= "fetchand";
743 case OE_RRR(FETCHOR4
, 0, X1
):
744 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
745 TILEGX_EXCP_OPCODE_FETCHOR4
);
746 mnemonic
= "fetchor4";
748 case OE_RRR(FETCHOR
, 0, X1
):
749 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
750 TILEGX_EXCP_OPCODE_FETCHOR
);
751 mnemonic
= "fetchor";
753 case OE_RRR(FSINGLE_ADD1
, 0, X0
):
754 case OE_RRR(FSINGLE_ADDSUB2
, 0, X0
):
755 case OE_RRR(FSINGLE_MUL1
, 0, X0
):
756 case OE_RRR(FSINGLE_MUL2
, 0, X0
):
757 case OE_RRR(FSINGLE_PACK2
, 0, X0
):
758 case OE_RRR(FSINGLE_SUB1
, 0, X0
):
759 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
760 case OE_RRR(MNZ
, 0, X0
):
761 case OE_RRR(MNZ
, 0, X1
):
762 case OE_RRR(MNZ
, 4, Y0
):
763 case OE_RRR(MNZ
, 4, Y1
):
765 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, t0
, tsrcb
, t0
);
768 case OE_RRR(MULAX
, 0, X0
):
769 case OE_RRR(MULAX
, 3, Y0
):
770 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
771 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
772 tcg_gen_ext32s_tl(tdest
, tdest
);
775 case OE_RRR(MULA_HS_HS
, 0, X0
):
776 case OE_RRR(MULA_HS_HS
, 9, Y0
):
777 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
778 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
779 mnemonic
= "mula_hs_hs";
781 case OE_RRR(MULA_HS_HU
, 0, X0
):
782 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
783 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
784 mnemonic
= "mula_hs_hu";
786 case OE_RRR(MULA_HS_LS
, 0, X0
):
787 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
788 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
789 mnemonic
= "mula_hs_ls";
791 case OE_RRR(MULA_HS_LU
, 0, X0
):
792 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
793 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
794 mnemonic
= "mula_hs_lu";
796 case OE_RRR(MULA_HU_HU
, 0, X0
):
797 case OE_RRR(MULA_HU_HU
, 9, Y0
):
798 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
799 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
800 mnemonic
= "mula_hu_hu";
802 case OE_RRR(MULA_HU_LS
, 0, X0
):
803 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
804 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
805 mnemonic
= "mula_hu_ls";
807 case OE_RRR(MULA_HU_LU
, 0, X0
):
808 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
809 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
810 mnemonic
= "mula_hu_lu";
812 case OE_RRR(MULA_LS_LS
, 0, X0
):
813 case OE_RRR(MULA_LS_LS
, 9, Y0
):
814 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
815 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
816 mnemonic
= "mula_ls_ls";
818 case OE_RRR(MULA_LS_LU
, 0, X0
):
819 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
820 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
821 mnemonic
= "mula_ls_lu";
823 case OE_RRR(MULA_LU_LU
, 0, X0
):
824 case OE_RRR(MULA_LU_LU
, 9, Y0
):
825 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
826 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
827 mnemonic
= "mula_lu_lu";
829 case OE_RRR(MULX
, 0, X0
):
830 case OE_RRR(MULX
, 3, Y0
):
831 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
832 tcg_gen_ext32s_tl(tdest
, tdest
);
835 case OE_RRR(MUL_HS_HS
, 0, X0
):
836 case OE_RRR(MUL_HS_HS
, 8, Y0
):
837 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
838 mnemonic
= "mul_hs_hs";
840 case OE_RRR(MUL_HS_HU
, 0, X0
):
841 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
842 mnemonic
= "mul_hs_hu";
844 case OE_RRR(MUL_HS_LS
, 0, X0
):
845 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
846 mnemonic
= "mul_hs_ls";
848 case OE_RRR(MUL_HS_LU
, 0, X0
):
849 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
850 mnemonic
= "mul_hs_lu";
852 case OE_RRR(MUL_HU_HU
, 0, X0
):
853 case OE_RRR(MUL_HU_HU
, 8, Y0
):
854 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
855 mnemonic
= "mul_hu_hu";
857 case OE_RRR(MUL_HU_LS
, 0, X0
):
858 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
859 mnemonic
= "mul_hu_ls";
861 case OE_RRR(MUL_HU_LU
, 0, X0
):
862 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
863 mnemonic
= "mul_hu_lu";
865 case OE_RRR(MUL_LS_LS
, 0, X0
):
866 case OE_RRR(MUL_LS_LS
, 8, Y0
):
867 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
868 mnemonic
= "mul_ls_ls";
870 case OE_RRR(MUL_LS_LU
, 0, X0
):
871 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
872 mnemonic
= "mul_ls_lu";
874 case OE_RRR(MUL_LU_LU
, 0, X0
):
875 case OE_RRR(MUL_LU_LU
, 8, Y0
):
876 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
877 mnemonic
= "mul_lu_lu";
879 case OE_RRR(MZ
, 0, X0
):
880 case OE_RRR(MZ
, 0, X1
):
881 case OE_RRR(MZ
, 4, Y0
):
882 case OE_RRR(MZ
, 4, Y1
):
884 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, t0
, tsrcb
, t0
);
887 case OE_RRR(NOR
, 0, X0
):
888 case OE_RRR(NOR
, 0, X1
):
889 case OE_RRR(NOR
, 5, Y0
):
890 case OE_RRR(NOR
, 5, Y1
):
891 tcg_gen_nor_tl(tdest
, tsrca
, tsrcb
);
894 case OE_RRR(OR
, 0, X0
):
895 case OE_RRR(OR
, 0, X1
):
896 case OE_RRR(OR
, 5, Y0
):
897 case OE_RRR(OR
, 5, Y1
):
898 tcg_gen_or_tl(tdest
, tsrca
, tsrcb
);
901 case OE_RRR(ROTL
, 0, X0
):
902 case OE_RRR(ROTL
, 0, X1
):
903 case OE_RRR(ROTL
, 6, Y0
):
904 case OE_RRR(ROTL
, 6, Y1
):
905 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
906 tcg_gen_rotl_tl(tdest
, tsrca
, tdest
);
909 case OE_RRR(SHL1ADDX
, 0, X0
):
910 case OE_RRR(SHL1ADDX
, 0, X1
):
911 case OE_RRR(SHL1ADDX
, 7, Y0
):
912 case OE_RRR(SHL1ADDX
, 7, Y1
):
913 tcg_gen_shli_tl(tdest
, tsrca
, 1);
914 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
915 tcg_gen_ext32s_tl(tdest
, tdest
);
916 mnemonic
= "shl1addx";
918 case OE_RRR(SHL1ADD
, 0, X0
):
919 case OE_RRR(SHL1ADD
, 0, X1
):
920 case OE_RRR(SHL1ADD
, 1, Y0
):
921 case OE_RRR(SHL1ADD
, 1, Y1
):
922 tcg_gen_shli_tl(tdest
, tsrca
, 1);
923 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
924 mnemonic
= "shl1add";
926 case OE_RRR(SHL2ADDX
, 0, X0
):
927 case OE_RRR(SHL2ADDX
, 0, X1
):
928 case OE_RRR(SHL2ADDX
, 7, Y0
):
929 case OE_RRR(SHL2ADDX
, 7, Y1
):
930 tcg_gen_shli_tl(tdest
, tsrca
, 2);
931 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
932 tcg_gen_ext32s_tl(tdest
, tdest
);
933 mnemonic
= "shl2addx";
935 case OE_RRR(SHL2ADD
, 0, X0
):
936 case OE_RRR(SHL2ADD
, 0, X1
):
937 case OE_RRR(SHL2ADD
, 1, Y0
):
938 case OE_RRR(SHL2ADD
, 1, Y1
):
939 tcg_gen_shli_tl(tdest
, tsrca
, 2);
940 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
941 mnemonic
= "shl2add";
943 case OE_RRR(SHL3ADDX
, 0, X0
):
944 case OE_RRR(SHL3ADDX
, 0, X1
):
945 case OE_RRR(SHL3ADDX
, 7, Y0
):
946 case OE_RRR(SHL3ADDX
, 7, Y1
):
947 tcg_gen_shli_tl(tdest
, tsrca
, 3);
948 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
949 tcg_gen_ext32s_tl(tdest
, tdest
);
950 mnemonic
= "shl3addx";
952 case OE_RRR(SHL3ADD
, 0, X0
):
953 case OE_RRR(SHL3ADD
, 0, X1
):
954 case OE_RRR(SHL3ADD
, 1, Y0
):
955 case OE_RRR(SHL3ADD
, 1, Y1
):
956 tcg_gen_shli_tl(tdest
, tsrca
, 3);
957 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
958 mnemonic
= "shl3add";
960 case OE_RRR(SHLX
, 0, X0
):
961 case OE_RRR(SHLX
, 0, X1
):
962 tcg_gen_andi_tl(tdest
, tsrcb
, 31);
963 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
964 tcg_gen_ext32s_tl(tdest
, tdest
);
967 case OE_RRR(SHL
, 0, X0
):
968 case OE_RRR(SHL
, 0, X1
):
969 case OE_RRR(SHL
, 6, Y0
):
970 case OE_RRR(SHL
, 6, Y1
):
971 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
972 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
975 case OE_RRR(SHRS
, 0, X0
):
976 case OE_RRR(SHRS
, 0, X1
):
977 case OE_RRR(SHRS
, 6, Y0
):
978 case OE_RRR(SHRS
, 6, Y1
):
979 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
980 tcg_gen_sar_tl(tdest
, tsrca
, tdest
);
983 case OE_RRR(SHRUX
, 0, X0
):
984 case OE_RRR(SHRUX
, 0, X1
):
986 tcg_gen_andi_tl(t0
, tsrcb
, 31);
987 tcg_gen_ext32u_tl(tdest
, tsrca
);
988 tcg_gen_shr_tl(tdest
, tdest
, t0
);
989 tcg_gen_ext32s_tl(tdest
, tdest
);
993 case OE_RRR(SHRU
, 0, X0
):
994 case OE_RRR(SHRU
, 0, X1
):
995 case OE_RRR(SHRU
, 6, Y0
):
996 case OE_RRR(SHRU
, 6, Y1
):
997 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
998 tcg_gen_shr_tl(tdest
, tsrca
, tdest
);
1001 case OE_RRR(SHUFFLEBYTES
, 0, X0
):
1002 gen_helper_shufflebytes(tdest
, load_gr(dc
, dest
), tsrca
, tsrca
);
1003 mnemonic
= "shufflebytes";
1005 case OE_RRR(SUBXSC
, 0, X0
):
1006 case OE_RRR(SUBXSC
, 0, X1
):
1007 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_sub_tl
);
1008 mnemonic
= "subxsc";
1010 case OE_RRR(SUBX
, 0, X0
):
1011 case OE_RRR(SUBX
, 0, X1
):
1012 case OE_RRR(SUBX
, 0, Y0
):
1013 case OE_RRR(SUBX
, 0, Y1
):
1014 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1015 tcg_gen_ext32s_tl(tdest
, tdest
);
1018 case OE_RRR(SUB
, 0, X0
):
1019 case OE_RRR(SUB
, 0, X1
):
1020 case OE_RRR(SUB
, 0, Y0
):
1021 case OE_RRR(SUB
, 0, Y1
):
1022 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1025 case OE_RRR(V1ADDUC
, 0, X0
):
1026 case OE_RRR(V1ADDUC
, 0, X1
):
1027 case OE_RRR(V1ADD
, 0, X0
):
1028 case OE_RRR(V1ADD
, 0, X1
):
1029 case OE_RRR(V1ADIFFU
, 0, X0
):
1030 case OE_RRR(V1AVGU
, 0, X0
):
1031 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1032 case OE_RRR(V1CMPEQ
, 0, X0
):
1033 case OE_RRR(V1CMPEQ
, 0, X1
):
1034 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1035 gen_v1cmpeq0(tdest
);
1036 mnemonic
= "v1cmpeq";
1038 case OE_RRR(V1CMPLES
, 0, X0
):
1039 case OE_RRR(V1CMPLES
, 0, X1
):
1040 case OE_RRR(V1CMPLEU
, 0, X0
):
1041 case OE_RRR(V1CMPLEU
, 0, X1
):
1042 case OE_RRR(V1CMPLTS
, 0, X0
):
1043 case OE_RRR(V1CMPLTS
, 0, X1
):
1044 case OE_RRR(V1CMPLTU
, 0, X0
):
1045 case OE_RRR(V1CMPLTU
, 0, X1
):
1046 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1047 case OE_RRR(V1CMPNE
, 0, X0
):
1048 case OE_RRR(V1CMPNE
, 0, X1
):
1049 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1050 gen_v1cmpne0(tdest
);
1051 mnemonic
= "v1cmpne";
1053 case OE_RRR(V1DDOTPUA
, 0, X0
):
1054 case OE_RRR(V1DDOTPUSA
, 0, X0
):
1055 case OE_RRR(V1DDOTPUS
, 0, X0
):
1056 case OE_RRR(V1DDOTPU
, 0, X0
):
1057 case OE_RRR(V1DOTPA
, 0, X0
):
1058 case OE_RRR(V1DOTPUA
, 0, X0
):
1059 case OE_RRR(V1DOTPUSA
, 0, X0
):
1060 case OE_RRR(V1DOTPUS
, 0, X0
):
1061 case OE_RRR(V1DOTPU
, 0, X0
):
1062 case OE_RRR(V1DOTP
, 0, X0
):
1063 case OE_RRR(V1INT_H
, 0, X0
):
1064 case OE_RRR(V1INT_H
, 0, X1
):
1065 case OE_RRR(V1INT_L
, 0, X0
):
1066 case OE_RRR(V1INT_L
, 0, X1
):
1067 case OE_RRR(V1MAXU
, 0, X0
):
1068 case OE_RRR(V1MAXU
, 0, X1
):
1069 case OE_RRR(V1MINU
, 0, X0
):
1070 case OE_RRR(V1MINU
, 0, X1
):
1071 case OE_RRR(V1MNZ
, 0, X0
):
1072 case OE_RRR(V1MNZ
, 0, X1
):
1073 case OE_RRR(V1MULTU
, 0, X0
):
1074 case OE_RRR(V1MULUS
, 0, X0
):
1075 case OE_RRR(V1MULU
, 0, X0
):
1076 case OE_RRR(V1MZ
, 0, X0
):
1077 case OE_RRR(V1MZ
, 0, X1
):
1078 case OE_RRR(V1SADAU
, 0, X0
):
1079 case OE_RRR(V1SADU
, 0, X0
):
1080 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1081 case OE_RRR(V1SHL
, 0, X0
):
1082 case OE_RRR(V1SHL
, 0, X1
):
1083 gen_helper_v1shl(tdest
, tsrca
, tsrcb
);
1086 case OE_RRR(V1SHRS
, 0, X0
):
1087 case OE_RRR(V1SHRS
, 0, X1
):
1088 gen_helper_v1shrs(tdest
, tsrca
, tsrcb
);
1089 mnemonic
= "v1shrs";
1091 case OE_RRR(V1SHRU
, 0, X0
):
1092 case OE_RRR(V1SHRU
, 0, X1
):
1093 gen_helper_v1shru(tdest
, tsrca
, tsrcb
);
1094 mnemonic
= "v1shru";
1096 case OE_RRR(V1SUBUC
, 0, X0
):
1097 case OE_RRR(V1SUBUC
, 0, X1
):
1098 case OE_RRR(V1SUB
, 0, X0
):
1099 case OE_RRR(V1SUB
, 0, X1
):
1100 case OE_RRR(V2ADDSC
, 0, X0
):
1101 case OE_RRR(V2ADDSC
, 0, X1
):
1102 case OE_RRR(V2ADD
, 0, X0
):
1103 case OE_RRR(V2ADD
, 0, X1
):
1104 case OE_RRR(V2ADIFFS
, 0, X0
):
1105 case OE_RRR(V2AVGS
, 0, X0
):
1106 case OE_RRR(V2CMPEQ
, 0, X0
):
1107 case OE_RRR(V2CMPEQ
, 0, X1
):
1108 case OE_RRR(V2CMPLES
, 0, X0
):
1109 case OE_RRR(V2CMPLES
, 0, X1
):
1110 case OE_RRR(V2CMPLEU
, 0, X0
):
1111 case OE_RRR(V2CMPLEU
, 0, X1
):
1112 case OE_RRR(V2CMPLTS
, 0, X0
):
1113 case OE_RRR(V2CMPLTS
, 0, X1
):
1114 case OE_RRR(V2CMPLTU
, 0, X0
):
1115 case OE_RRR(V2CMPLTU
, 0, X1
):
1116 case OE_RRR(V2CMPNE
, 0, X0
):
1117 case OE_RRR(V2CMPNE
, 0, X1
):
1118 case OE_RRR(V2DOTPA
, 0, X0
):
1119 case OE_RRR(V2DOTP
, 0, X0
):
1120 case OE_RRR(V2INT_H
, 0, X0
):
1121 case OE_RRR(V2INT_H
, 0, X1
):
1122 case OE_RRR(V2INT_L
, 0, X0
):
1123 case OE_RRR(V2INT_L
, 0, X1
):
1124 case OE_RRR(V2MAXS
, 0, X0
):
1125 case OE_RRR(V2MAXS
, 0, X1
):
1126 case OE_RRR(V2MINS
, 0, X0
):
1127 case OE_RRR(V2MINS
, 0, X1
):
1128 case OE_RRR(V2MNZ
, 0, X0
):
1129 case OE_RRR(V2MNZ
, 0, X1
):
1130 case OE_RRR(V2MULFSC
, 0, X0
):
1131 case OE_RRR(V2MULS
, 0, X0
):
1132 case OE_RRR(V2MULTS
, 0, X0
):
1133 case OE_RRR(V2MZ
, 0, X0
):
1134 case OE_RRR(V2MZ
, 0, X1
):
1135 case OE_RRR(V2PACKH
, 0, X0
):
1136 case OE_RRR(V2PACKH
, 0, X1
):
1137 case OE_RRR(V2PACKL
, 0, X0
):
1138 case OE_RRR(V2PACKL
, 0, X1
):
1139 case OE_RRR(V2PACKUC
, 0, X0
):
1140 case OE_RRR(V2PACKUC
, 0, X1
):
1141 case OE_RRR(V2SADAS
, 0, X0
):
1142 case OE_RRR(V2SADAU
, 0, X0
):
1143 case OE_RRR(V2SADS
, 0, X0
):
1144 case OE_RRR(V2SADU
, 0, X0
):
1145 case OE_RRR(V2SHLSC
, 0, X0
):
1146 case OE_RRR(V2SHLSC
, 0, X1
):
1147 case OE_RRR(V2SHL
, 0, X0
):
1148 case OE_RRR(V2SHL
, 0, X1
):
1149 case OE_RRR(V2SHRS
, 0, X0
):
1150 case OE_RRR(V2SHRS
, 0, X1
):
1151 case OE_RRR(V2SHRU
, 0, X0
):
1152 case OE_RRR(V2SHRU
, 0, X1
):
1153 case OE_RRR(V2SUBSC
, 0, X0
):
1154 case OE_RRR(V2SUBSC
, 0, X1
):
1155 case OE_RRR(V2SUB
, 0, X0
):
1156 case OE_RRR(V2SUB
, 0, X1
):
1157 case OE_RRR(V4ADDSC
, 0, X0
):
1158 case OE_RRR(V4ADDSC
, 0, X1
):
1159 case OE_RRR(V4ADD
, 0, X0
):
1160 case OE_RRR(V4ADD
, 0, X1
):
1161 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1162 case OE_RRR(V4INT_H
, 0, X0
):
1163 case OE_RRR(V4INT_H
, 0, X1
):
1164 tcg_gen_shri_tl(tdest
, tsrcb
, 32);
1165 tcg_gen_deposit_tl(tdest
, tsrca
, tdest
, 0, 32);
1166 mnemonic
= "v4int_h";
1168 case OE_RRR(V4INT_L
, 0, X0
):
1169 case OE_RRR(V4INT_L
, 0, X1
):
1170 tcg_gen_deposit_tl(tdest
, tsrcb
, tsrca
, 32, 32);
1171 mnemonic
= "v4int_l";
1173 case OE_RRR(V4PACKSC
, 0, X0
):
1174 case OE_RRR(V4PACKSC
, 0, X1
):
1175 case OE_RRR(V4SHLSC
, 0, X0
):
1176 case OE_RRR(V4SHLSC
, 0, X1
):
1177 case OE_RRR(V4SHL
, 0, X0
):
1178 case OE_RRR(V4SHL
, 0, X1
):
1179 case OE_RRR(V4SHRS
, 0, X0
):
1180 case OE_RRR(V4SHRS
, 0, X1
):
1181 case OE_RRR(V4SHRU
, 0, X0
):
1182 case OE_RRR(V4SHRU
, 0, X1
):
1183 case OE_RRR(V4SUBSC
, 0, X0
):
1184 case OE_RRR(V4SUBSC
, 0, X1
):
1185 case OE_RRR(V4SUB
, 0, X0
):
1186 case OE_RRR(V4SUB
, 0, X1
):
1187 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1188 case OE_RRR(XOR
, 0, X0
):
1189 case OE_RRR(XOR
, 0, X1
):
1190 case OE_RRR(XOR
, 5, Y0
):
1191 case OE_RRR(XOR
, 5, Y1
):
1192 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1196 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1199 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %s", mnemonic
,
1200 reg_names
[dest
], reg_names
[srca
], reg_names
[srcb
]);
1201 return TILEGX_EXCP_NONE
;
1204 static TileExcp
gen_rri_opcode(DisasContext
*dc
, unsigned opext
,
1205 unsigned dest
, unsigned srca
, int imm
)
1207 TCGv tdest
= dest_gr(dc
, dest
);
1208 TCGv tsrca
= load_gr(dc
, srca
);
1209 const char *mnemonic
;
1215 case OE(ADDI_OPCODE_Y0
, 0, Y0
):
1216 case OE(ADDI_OPCODE_Y1
, 0, Y1
):
1217 case OE_IM(ADDI
, X0
):
1218 case OE_IM(ADDI
, X1
):
1219 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1222 case OE(ADDXI_OPCODE_Y0
, 0, Y0
):
1223 case OE(ADDXI_OPCODE_Y1
, 0, Y1
):
1224 case OE_IM(ADDXI
, X0
):
1225 case OE_IM(ADDXI
, X1
):
1226 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1227 tcg_gen_ext32s_tl(tdest
, tdest
);
1230 case OE(ANDI_OPCODE_Y0
, 0, Y0
):
1231 case OE(ANDI_OPCODE_Y1
, 0, Y1
):
1232 case OE_IM(ANDI
, X0
):
1233 case OE_IM(ANDI
, X1
):
1234 tcg_gen_andi_tl(tdest
, tsrca
, imm
);
1237 case OE(CMPEQI_OPCODE_Y0
, 0, Y0
):
1238 case OE(CMPEQI_OPCODE_Y1
, 0, Y1
):
1239 case OE_IM(CMPEQI
, X0
):
1240 case OE_IM(CMPEQI
, X1
):
1241 tcg_gen_setcondi_tl(TCG_COND_EQ
, tdest
, tsrca
, imm
);
1242 mnemonic
= "cmpeqi";
1244 case OE(CMPLTSI_OPCODE_Y0
, 0, Y0
):
1245 case OE(CMPLTSI_OPCODE_Y1
, 0, Y1
):
1246 case OE_IM(CMPLTSI
, X0
):
1247 case OE_IM(CMPLTSI
, X1
):
1248 tcg_gen_setcondi_tl(TCG_COND_LT
, tdest
, tsrca
, imm
);
1249 mnemonic
= "cmpltsi";
1251 case OE_IM(CMPLTUI
, X0
):
1252 case OE_IM(CMPLTUI
, X1
):
1253 tcg_gen_setcondi_tl(TCG_COND_LTU
, tdest
, tsrca
, imm
);
1254 mnemonic
= "cmpltui";
1256 case OE_IM(LD1S_ADD
, X1
):
1258 mnemonic
= "ld1s_add";
1260 case OE_IM(LD1U_ADD
, X1
):
1262 mnemonic
= "ld1u_add";
1264 case OE_IM(LD2S_ADD
, X1
):
1266 mnemonic
= "ld2s_add";
1268 case OE_IM(LD2U_ADD
, X1
):
1270 mnemonic
= "ld2u_add";
1272 case OE_IM(LD4S_ADD
, X1
):
1274 mnemonic
= "ld4s_add";
1276 case OE_IM(LD4U_ADD
, X1
):
1278 mnemonic
= "ld4u_add";
1280 case OE_IM(LDNT1S_ADD
, X1
):
1282 mnemonic
= "ldnt1s_add";
1284 case OE_IM(LDNT1U_ADD
, X1
):
1286 mnemonic
= "ldnt1u_add";
1288 case OE_IM(LDNT2S_ADD
, X1
):
1290 mnemonic
= "ldnt2s_add";
1292 case OE_IM(LDNT2U_ADD
, X1
):
1294 mnemonic
= "ldnt2u_add";
1296 case OE_IM(LDNT4S_ADD
, X1
):
1298 mnemonic
= "ldnt4s_add";
1300 case OE_IM(LDNT4U_ADD
, X1
):
1302 mnemonic
= "ldnt4u_add";
1304 case OE_IM(LDNT_ADD
, X1
):
1306 mnemonic
= "ldnt_add";
1308 case OE_IM(LD_ADD
, X1
):
1310 mnemonic
= "ldnt_add";
1312 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
1313 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1315 case OE_IM(LDNA_ADD
, X1
):
1316 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
1317 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
1318 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1319 mnemonic
= "ldna_add";
1321 case OE_IM(ORI
, X0
):
1322 case OE_IM(ORI
, X1
):
1323 tcg_gen_ori_tl(tdest
, tsrca
, imm
);
1326 case OE_IM(V1ADDI
, X0
):
1327 case OE_IM(V1ADDI
, X1
):
1328 case OE_IM(V1CMPEQI
, X0
):
1329 case OE_IM(V1CMPEQI
, X1
):
1330 tcg_gen_xori_tl(tdest
, tsrca
, V1_IMM(imm
));
1331 gen_v1cmpeq0(tdest
);
1332 mnemonic
= "v1cmpeqi";
1334 case OE_IM(V1CMPLTSI
, X0
):
1335 case OE_IM(V1CMPLTSI
, X1
):
1336 case OE_IM(V1CMPLTUI
, X0
):
1337 case OE_IM(V1CMPLTUI
, X1
):
1338 case OE_IM(V1MAXUI
, X0
):
1339 case OE_IM(V1MAXUI
, X1
):
1340 case OE_IM(V1MINUI
, X0
):
1341 case OE_IM(V1MINUI
, X1
):
1342 case OE_IM(V2ADDI
, X0
):
1343 case OE_IM(V2ADDI
, X1
):
1344 case OE_IM(V2CMPEQI
, X0
):
1345 case OE_IM(V2CMPEQI
, X1
):
1346 case OE_IM(V2CMPLTSI
, X0
):
1347 case OE_IM(V2CMPLTSI
, X1
):
1348 case OE_IM(V2CMPLTUI
, X0
):
1349 case OE_IM(V2CMPLTUI
, X1
):
1350 case OE_IM(V2MAXSI
, X0
):
1351 case OE_IM(V2MAXSI
, X1
):
1352 case OE_IM(V2MINSI
, X0
):
1353 case OE_IM(V2MINSI
, X1
):
1354 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1355 case OE_IM(XORI
, X0
):
1356 case OE_IM(XORI
, X1
):
1357 tcg_gen_xori_tl(tdest
, tsrca
, imm
);
1361 case OE_SH(ROTLI
, X0
):
1362 case OE_SH(ROTLI
, X1
):
1363 case OE_SH(ROTLI
, Y0
):
1364 case OE_SH(ROTLI
, Y1
):
1365 tcg_gen_rotli_tl(tdest
, tsrca
, imm
);
1368 case OE_SH(SHLI
, X0
):
1369 case OE_SH(SHLI
, X1
):
1370 case OE_SH(SHLI
, Y0
):
1371 case OE_SH(SHLI
, Y1
):
1372 tcg_gen_shli_tl(tdest
, tsrca
, imm
);
1375 case OE_SH(SHLXI
, X0
):
1376 case OE_SH(SHLXI
, X1
):
1377 tcg_gen_shli_tl(tdest
, tsrca
, imm
& 31);
1378 tcg_gen_ext32s_tl(tdest
, tdest
);
1381 case OE_SH(SHRSI
, X0
):
1382 case OE_SH(SHRSI
, X1
):
1383 case OE_SH(SHRSI
, Y0
):
1384 case OE_SH(SHRSI
, Y1
):
1385 tcg_gen_sari_tl(tdest
, tsrca
, imm
);
1388 case OE_SH(SHRUI
, X0
):
1389 case OE_SH(SHRUI
, X1
):
1390 case OE_SH(SHRUI
, Y0
):
1391 case OE_SH(SHRUI
, Y1
):
1392 tcg_gen_shri_tl(tdest
, tsrca
, imm
);
1395 case OE_SH(SHRUXI
, X0
):
1396 case OE_SH(SHRUXI
, X1
):
1397 if ((imm
& 31) == 0) {
1398 tcg_gen_ext32s_tl(tdest
, tsrca
);
1400 tcg_gen_ext32u_tl(tdest
, tsrca
);
1401 tcg_gen_shri_tl(tdest
, tdest
, imm
& 31);
1405 case OE_SH(V1SHLI
, X0
):
1406 case OE_SH(V1SHLI
, X1
):
1409 tcg_gen_andi_tl(tdest
, tsrca
, V1_IMM(i3
));
1410 tcg_gen_shli_tl(tdest
, tdest
, i2
);
1411 mnemonic
= "v1shli";
1413 case OE_SH(V1SHRSI
, X0
):
1414 case OE_SH(V1SHRSI
, X1
):
1415 t0
= tcg_const_tl(imm
& 7);
1416 gen_helper_v1shrs(tdest
, tsrca
, t0
);
1418 mnemonic
= "v1shrsi";
1420 case OE_SH(V1SHRUI
, X0
):
1421 case OE_SH(V1SHRUI
, X1
):
1423 i3
= (0xff << i2
) & 0xff;
1424 tcg_gen_andi_tl(tdest
, tsrca
, V1_IMM(i3
));
1425 tcg_gen_shri_tl(tdest
, tdest
, i2
);
1426 mnemonic
= "v1shrui";
1428 case OE_SH(V2SHLI
, X0
):
1429 case OE_SH(V2SHLI
, X1
):
1430 case OE_SH(V2SHRSI
, X0
):
1431 case OE_SH(V2SHRSI
, X1
):
1432 case OE_SH(V2SHRUI
, X0
):
1433 case OE_SH(V2SHRUI
, X1
):
1434 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1436 case OE(ADDLI_OPCODE_X0
, 0, X0
):
1437 case OE(ADDLI_OPCODE_X1
, 0, X1
):
1438 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1441 case OE(ADDXLI_OPCODE_X0
, 0, X0
):
1442 case OE(ADDXLI_OPCODE_X1
, 0, X1
):
1443 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1444 tcg_gen_ext32s_tl(tdest
, tdest
);
1445 mnemonic
= "addxli";
1447 case OE(SHL16INSLI_OPCODE_X0
, 0, X0
):
1448 case OE(SHL16INSLI_OPCODE_X1
, 0, X1
):
1449 tcg_gen_shli_tl(tdest
, tsrca
, 16);
1450 tcg_gen_ori_tl(tdest
, tdest
, imm
& 0xffff);
1451 mnemonic
= "shl16insli";
1455 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1458 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", mnemonic
,
1459 reg_names
[dest
], reg_names
[srca
], imm
);
1460 return TILEGX_EXCP_NONE
;
1463 static TileExcp
gen_bf_opcode_x0(DisasContext
*dc
, unsigned ext
,
1464 unsigned dest
, unsigned srca
,
1465 unsigned bfs
, unsigned bfe
)
1467 TCGv tdest
= dest_gr(dc
, dest
);
1468 TCGv tsrca
= load_gr(dc
, srca
);
1471 const char *mnemonic
;
1473 /* The bitfield is either between E and S inclusive,
1474 or up from S and down from E inclusive. */
1476 len
= bfe
- bfs
+ 1;
1478 len
= (64 - bfs
) + (bfe
+ 1);
1482 case BFEXTU_BF_OPCODE_X0
:
1483 if (bfs
== 0 && bfe
== 7) {
1484 tcg_gen_ext8u_tl(tdest
, tsrca
);
1485 } else if (bfs
== 0 && bfe
== 15) {
1486 tcg_gen_ext16u_tl(tdest
, tsrca
);
1487 } else if (bfs
== 0 && bfe
== 31) {
1488 tcg_gen_ext32u_tl(tdest
, tsrca
);
1492 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1494 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1496 tcg_gen_shri_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1498 mnemonic
= "bfextu";
1501 case BFEXTS_BF_OPCODE_X0
:
1502 if (bfs
== 0 && bfe
== 7) {
1503 tcg_gen_ext8s_tl(tdest
, tsrca
);
1504 } else if (bfs
== 0 && bfe
== 15) {
1505 tcg_gen_ext16s_tl(tdest
, tsrca
);
1506 } else if (bfs
== 0 && bfe
== 31) {
1507 tcg_gen_ext32s_tl(tdest
, tsrca
);
1511 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1513 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1515 tcg_gen_sari_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1517 mnemonic
= "bfexts";
1520 case BFINS_BF_OPCODE_X0
:
1521 tsrcd
= load_gr(dc
, dest
);
1523 tcg_gen_deposit_tl(tdest
, tsrcd
, tsrca
, bfs
, len
);
1525 tcg_gen_rotri_tl(tdest
, tsrcd
, bfs
);
1526 tcg_gen_deposit_tl(tdest
, tdest
, tsrca
, 0, len
);
1527 tcg_gen_rotli_tl(tdest
, tdest
, bfs
);
1532 case MM_BF_OPCODE_X0
:
1533 tsrcd
= load_gr(dc
, dest
);
1535 tcg_gen_deposit_tl(tdest
, tsrca
, tsrcd
, 0, len
);
1537 uint64_t mask
= len
== 64 ? -1 : rol64((1ULL << len
) - 1, bfs
);
1538 TCGv tmp
= tcg_const_tl(mask
);
1540 tcg_gen_and_tl(tdest
, tsrcd
, tmp
);
1541 tcg_gen_andc_tl(tmp
, tsrca
, tmp
);
1542 tcg_gen_or_tl(tdest
, tdest
, tmp
);
1549 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1552 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %u, %u", mnemonic
,
1553 reg_names
[dest
], reg_names
[srca
], bfs
, bfe
);
1554 return TILEGX_EXCP_NONE
;
1557 static TileExcp
gen_branch_opcode_x1(DisasContext
*dc
, unsigned ext
,
1558 unsigned srca
, int off
)
1560 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1561 const char *mnemonic
;
1563 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1564 dc
->jmp
.val1
= tcg_temp_new();
1565 tcg_gen_mov_tl(dc
->jmp
.val1
, load_gr(dc
, srca
));
1567 /* Note that the "predict taken" opcodes have bit 0 clear.
1568 Therefore, fold the two cases together by setting bit 0. */
1570 case BEQZ_BRANCH_OPCODE_X1
:
1571 dc
->jmp
.cond
= TCG_COND_EQ
;
1574 case BNEZ_BRANCH_OPCODE_X1
:
1575 dc
->jmp
.cond
= TCG_COND_NE
;
1578 case BGEZ_BRANCH_OPCODE_X1
:
1579 dc
->jmp
.cond
= TCG_COND_GE
;
1582 case BGTZ_BRANCH_OPCODE_X1
:
1583 dc
->jmp
.cond
= TCG_COND_GT
;
1586 case BLEZ_BRANCH_OPCODE_X1
:
1587 dc
->jmp
.cond
= TCG_COND_LE
;
1590 case BLTZ_BRANCH_OPCODE_X1
:
1591 dc
->jmp
.cond
= TCG_COND_LT
;
1594 case BLBC_BRANCH_OPCODE_X1
:
1595 dc
->jmp
.cond
= TCG_COND_EQ
;
1596 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1599 case BLBS_BRANCH_OPCODE_X1
:
1600 dc
->jmp
.cond
= TCG_COND_NE
;
1601 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1605 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1608 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1609 qemu_log("%s%s %s, " TARGET_FMT_lx
" <%s>",
1610 mnemonic
, ext
& 1 ? "" : "t",
1611 reg_names
[srca
], tgt
, lookup_symbol(tgt
));
1613 return TILEGX_EXCP_NONE
;
1616 static TileExcp
gen_jump_opcode_x1(DisasContext
*dc
, unsigned ext
, int off
)
1618 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1619 const char *mnemonic
= "j";
1621 /* The extension field is 1 bit, therefore we only have JAL and J. */
1622 if (ext
== JAL_JUMP_OPCODE_X1
) {
1623 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
1624 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
1627 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
1628 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1630 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1631 qemu_log("%s " TARGET_FMT_lx
" <%s>",
1632 mnemonic
, tgt
, lookup_symbol(tgt
));
1634 return TILEGX_EXCP_NONE
;
1640 void (*get
)(TCGv
, TCGv_ptr
);
1641 void (*put
)(TCGv_ptr
, TCGv
);
1644 static const TileSPR
*find_spr(unsigned spr
)
1646 /* Allow the compiler to construct the binary search tree. */
1647 #define D(N, O, G, P) \
1648 case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
1652 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CMPEXCH
]), 0, 0)
1653 D(INTERRUPT_CRITICAL_SECTION
,
1654 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CRITICAL_SEC
]), 0, 0)
1656 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_SIM_CONTROL
]), 0, 0)
1661 qemu_log_mask(LOG_UNIMP
, "UNIMP SPR %u\n", spr
);
1665 static TileExcp
gen_mtspr_x1(DisasContext
*dc
, unsigned spr
, unsigned srca
)
1667 const TileSPR
*def
= find_spr(spr
);
1671 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr spr[%u], %s", spr
, reg_names
[srca
]);
1672 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1675 tsrca
= load_gr(dc
, srca
);
1677 def
->put(cpu_env
, tsrca
);
1679 tcg_gen_st_tl(tsrca
, cpu_env
, def
->offset
);
1681 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, %s", def
->name
, reg_names
[srca
]);
1682 return TILEGX_EXCP_NONE
;
1685 static TileExcp
gen_mfspr_x1(DisasContext
*dc
, unsigned dest
, unsigned spr
)
1687 const TileSPR
*def
= find_spr(spr
);
1691 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, spr[%u]", reg_names
[dest
], spr
);
1692 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1695 tdest
= dest_gr(dc
, dest
);
1697 def
->get(tdest
, cpu_env
);
1699 tcg_gen_ld_tl(tdest
, cpu_env
, def
->offset
);
1701 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mfspr %s, %s", reg_names
[dest
], def
->name
);
1702 return TILEGX_EXCP_NONE
;
1705 static TileExcp
decode_y0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1707 unsigned opc
= get_Opcode_Y0(bundle
);
1708 unsigned ext
= get_RRROpcodeExtension_Y0(bundle
);
1709 unsigned dest
= get_Dest_Y0(bundle
);
1710 unsigned srca
= get_SrcA_Y0(bundle
);
1715 case RRR_1_OPCODE_Y0
:
1716 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1717 ext
= get_UnaryOpcodeExtension_Y0(bundle
);
1718 return gen_rr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
);
1721 case RRR_0_OPCODE_Y0
:
1722 case RRR_2_OPCODE_Y0
:
1723 case RRR_3_OPCODE_Y0
:
1724 case RRR_4_OPCODE_Y0
:
1725 case RRR_5_OPCODE_Y0
:
1726 case RRR_6_OPCODE_Y0
:
1727 case RRR_7_OPCODE_Y0
:
1728 case RRR_8_OPCODE_Y0
:
1729 case RRR_9_OPCODE_Y0
:
1730 srcb
= get_SrcB_Y0(bundle
);
1731 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, srcb
);
1733 case SHIFT_OPCODE_Y0
:
1734 ext
= get_ShiftOpcodeExtension_Y0(bundle
);
1735 imm
= get_ShAmt_Y0(bundle
);
1736 return gen_rri_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, imm
);
1738 case ADDI_OPCODE_Y0
:
1739 case ADDXI_OPCODE_Y0
:
1740 case ANDI_OPCODE_Y0
:
1741 case CMPEQI_OPCODE_Y0
:
1742 case CMPLTSI_OPCODE_Y0
:
1743 imm
= (int8_t)get_Imm8_Y0(bundle
);
1744 return gen_rri_opcode(dc
, OE(opc
, 0, Y0
), dest
, srca
, imm
);
1747 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1751 static TileExcp
decode_y1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1753 unsigned opc
= get_Opcode_Y1(bundle
);
1754 unsigned ext
= get_RRROpcodeExtension_Y1(bundle
);
1755 unsigned dest
= get_Dest_Y1(bundle
);
1756 unsigned srca
= get_SrcA_Y1(bundle
);
1760 switch (get_Opcode_Y1(bundle
)) {
1761 case RRR_1_OPCODE_Y1
:
1762 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1763 ext
= get_UnaryOpcodeExtension_Y1(bundle
);
1764 return gen_rr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
);
1767 case RRR_0_OPCODE_Y1
:
1768 case RRR_2_OPCODE_Y1
:
1769 case RRR_3_OPCODE_Y1
:
1770 case RRR_4_OPCODE_Y1
:
1771 case RRR_5_OPCODE_Y1
:
1772 case RRR_6_OPCODE_Y1
:
1773 case RRR_7_OPCODE_Y1
:
1774 srcb
= get_SrcB_Y1(bundle
);
1775 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, srcb
);
1777 case SHIFT_OPCODE_Y1
:
1778 ext
= get_ShiftOpcodeExtension_Y1(bundle
);
1779 imm
= get_ShAmt_Y1(bundle
);
1780 return gen_rri_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, imm
);
1782 case ADDI_OPCODE_Y1
:
1783 case ADDXI_OPCODE_Y1
:
1784 case ANDI_OPCODE_Y1
:
1785 case CMPEQI_OPCODE_Y1
:
1786 case CMPLTSI_OPCODE_Y1
:
1787 imm
= (int8_t)get_Imm8_Y1(bundle
);
1788 return gen_rri_opcode(dc
, OE(opc
, 0, Y1
), dest
, srca
, imm
);
1791 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1795 static TileExcp
decode_y2(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1797 unsigned mode
= get_Mode(bundle
);
1798 unsigned opc
= get_Opcode_Y2(bundle
);
1799 unsigned srca
= get_SrcA_Y2(bundle
);
1800 unsigned srcbdest
= get_SrcBDest_Y2(bundle
);
1801 const char *mnemonic
;
1804 switch (OEY2(opc
, mode
)) {
1805 case OEY2(LD1S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1809 case OEY2(LD1U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1813 case OEY2(LD2S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1817 case OEY2(LD2U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1821 case OEY2(LD4S_OPCODE_Y2
, MODE_OPCODE_YB2
):
1825 case OEY2(LD4U_OPCODE_Y2
, MODE_OPCODE_YB2
):
1829 case OEY2(LD_OPCODE_Y2
, MODE_OPCODE_YB2
):
1833 tcg_gen_qemu_ld_tl(dest_gr(dc
, srcbdest
), load_gr(dc
, srca
),
1835 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
1836 reg_names
[srcbdest
], reg_names
[srca
]);
1837 return TILEGX_EXCP_NONE
;
1839 case OEY2(ST1_OPCODE_Y2
, MODE_OPCODE_YC2
):
1840 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_UB
, "st1");
1841 case OEY2(ST2_OPCODE_Y2
, MODE_OPCODE_YC2
):
1842 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUW
, "st2");
1843 case OEY2(ST4_OPCODE_Y2
, MODE_OPCODE_YC2
):
1844 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUL
, "st4");
1845 case OEY2(ST_OPCODE_Y2
, MODE_OPCODE_YC2
):
1846 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEQ
, "st");
1849 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1853 static TileExcp
decode_x0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1855 unsigned opc
= get_Opcode_X0(bundle
);
1856 unsigned dest
= get_Dest_X0(bundle
);
1857 unsigned srca
= get_SrcA_X0(bundle
);
1858 unsigned ext
, srcb
, bfs
, bfe
;
1862 case RRR_0_OPCODE_X0
:
1863 ext
= get_RRROpcodeExtension_X0(bundle
);
1864 if (ext
== UNARY_RRR_0_OPCODE_X0
) {
1865 ext
= get_UnaryOpcodeExtension_X0(bundle
);
1866 return gen_rr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
);
1868 srcb
= get_SrcB_X0(bundle
);
1869 return gen_rrr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, srcb
);
1871 case SHIFT_OPCODE_X0
:
1872 ext
= get_ShiftOpcodeExtension_X0(bundle
);
1873 imm
= get_ShAmt_X0(bundle
);
1874 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
1876 case IMM8_OPCODE_X0
:
1877 ext
= get_Imm8OpcodeExtension_X0(bundle
);
1878 imm
= (int8_t)get_Imm8_X0(bundle
);
1879 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
1882 ext
= get_BFOpcodeExtension_X0(bundle
);
1883 bfs
= get_BFStart_X0(bundle
);
1884 bfe
= get_BFEnd_X0(bundle
);
1885 return gen_bf_opcode_x0(dc
, ext
, dest
, srca
, bfs
, bfe
);
1887 case ADDLI_OPCODE_X0
:
1888 case SHL16INSLI_OPCODE_X0
:
1889 case ADDXLI_OPCODE_X0
:
1890 imm
= (int16_t)get_Imm16_X0(bundle
);
1891 return gen_rri_opcode(dc
, OE(opc
, 0, X0
), dest
, srca
, imm
);
1894 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1898 static TileExcp
decode_x1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1900 unsigned opc
= get_Opcode_X1(bundle
);
1901 unsigned dest
= get_Dest_X1(bundle
);
1902 unsigned srca
= get_SrcA_X1(bundle
);
1907 case RRR_0_OPCODE_X1
:
1908 ext
= get_RRROpcodeExtension_X1(bundle
);
1909 srcb
= get_SrcB_X1(bundle
);
1911 case UNARY_RRR_0_OPCODE_X1
:
1912 ext
= get_UnaryOpcodeExtension_X1(bundle
);
1913 return gen_rr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
);
1914 case ST1_RRR_0_OPCODE_X1
:
1915 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "st1");
1916 case ST2_RRR_0_OPCODE_X1
:
1917 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "st2");
1918 case ST4_RRR_0_OPCODE_X1
:
1919 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "st4");
1920 case STNT1_RRR_0_OPCODE_X1
:
1921 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "stnt1");
1922 case STNT2_RRR_0_OPCODE_X1
:
1923 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "stnt2");
1924 case STNT4_RRR_0_OPCODE_X1
:
1925 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "stnt4");
1926 case STNT_RRR_0_OPCODE_X1
:
1927 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "stnt");
1928 case ST_RRR_0_OPCODE_X1
:
1929 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "st");
1931 return gen_rrr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, srcb
);
1933 case SHIFT_OPCODE_X1
:
1934 ext
= get_ShiftOpcodeExtension_X1(bundle
);
1935 imm
= get_ShAmt_X1(bundle
);
1936 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
1938 case IMM8_OPCODE_X1
:
1939 ext
= get_Imm8OpcodeExtension_X1(bundle
);
1940 imm
= (int8_t)get_Dest_Imm8_X1(bundle
);
1941 srcb
= get_SrcB_X1(bundle
);
1943 case ST1_ADD_IMM8_OPCODE_X1
:
1944 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "st1_add");
1945 case ST2_ADD_IMM8_OPCODE_X1
:
1946 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "st2_add");
1947 case ST4_ADD_IMM8_OPCODE_X1
:
1948 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "st4_add");
1949 case STNT1_ADD_IMM8_OPCODE_X1
:
1950 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "stnt1_add");
1951 case STNT2_ADD_IMM8_OPCODE_X1
:
1952 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "stnt2_add");
1953 case STNT4_ADD_IMM8_OPCODE_X1
:
1954 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "stnt4_add");
1955 case STNT_ADD_IMM8_OPCODE_X1
:
1956 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "stnt_add");
1957 case ST_ADD_IMM8_OPCODE_X1
:
1958 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "st_add");
1959 case MFSPR_IMM8_OPCODE_X1
:
1960 return gen_mfspr_x1(dc
, dest
, get_MF_Imm14_X1(bundle
));
1961 case MTSPR_IMM8_OPCODE_X1
:
1962 return gen_mtspr_x1(dc
, get_MT_Imm14_X1(bundle
), srca
);
1964 imm
= (int8_t)get_Imm8_X1(bundle
);
1965 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
1967 case BRANCH_OPCODE_X1
:
1968 ext
= get_BrType_X1(bundle
);
1969 imm
= sextract32(get_BrOff_X1(bundle
), 0, 17);
1970 return gen_branch_opcode_x1(dc
, ext
, srca
, imm
);
1972 case JUMP_OPCODE_X1
:
1973 ext
= get_JumpOpcodeExtension_X1(bundle
);
1974 imm
= sextract32(get_JumpOff_X1(bundle
), 0, 27);
1975 return gen_jump_opcode_x1(dc
, ext
, imm
);
1977 case ADDLI_OPCODE_X1
:
1978 case SHL16INSLI_OPCODE_X1
:
1979 case ADDXLI_OPCODE_X1
:
1980 imm
= (int16_t)get_Imm16_X1(bundle
);
1981 return gen_rri_opcode(dc
, OE(opc
, 0, X1
), dest
, srca
, imm
);
1984 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1988 static void notice_excp(DisasContext
*dc
, uint64_t bundle
,
1989 const char *type
, TileExcp excp
)
1991 if (likely(excp
== TILEGX_EXCP_NONE
)) {
1994 gen_exception(dc
, excp
);
1995 if (excp
== TILEGX_EXCP_OPCODE_UNIMPLEMENTED
) {
1996 qemu_log_mask(LOG_UNIMP
, "UNIMP %s, [" FMT64X
"]\n", type
, bundle
);
2000 static void translate_one_bundle(DisasContext
*dc
, uint64_t bundle
)
2004 for (i
= 0; i
< ARRAY_SIZE(dc
->wb
); i
++) {
2005 DisasContextTemp
*wb
= &dc
->wb
[i
];
2006 wb
->reg
= TILEGX_R_NOREG
;
2007 TCGV_UNUSED_I64(wb
->val
);
2011 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2012 tcg_gen_debug_insn_start(dc
->pc
);
2015 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %" PRIx64
": { ", dc
->pc
);
2016 if (get_Mode(bundle
)) {
2017 notice_excp(dc
, bundle
, "y0", decode_y0(dc
, bundle
));
2018 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2019 notice_excp(dc
, bundle
, "y1", decode_y1(dc
, bundle
));
2020 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2021 notice_excp(dc
, bundle
, "y2", decode_y2(dc
, bundle
));
2023 notice_excp(dc
, bundle
, "x0", decode_x0(dc
, bundle
));
2024 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2025 notice_excp(dc
, bundle
, "x1", decode_x1(dc
, bundle
));
2027 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " }\n");
2029 for (i
= dc
->num_wb
- 1; i
>= 0; --i
) {
2030 DisasContextTemp
*wb
= &dc
->wb
[i
];
2031 if (wb
->reg
< TILEGX_R_COUNT
) {
2032 tcg_gen_mov_i64(cpu_regs
[wb
->reg
], wb
->val
);
2034 tcg_temp_free_i64(wb
->val
);
2037 if (dc
->jmp
.cond
!= TCG_COND_NEVER
) {
2038 if (dc
->jmp
.cond
== TCG_COND_ALWAYS
) {
2039 tcg_gen_mov_i64(cpu_pc
, dc
->jmp
.dest
);
2041 TCGv next
= tcg_const_i64(dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
2042 tcg_gen_movcond_i64(dc
->jmp
.cond
, cpu_pc
,
2043 dc
->jmp
.val1
, load_zero(dc
),
2044 dc
->jmp
.dest
, next
);
2045 tcg_temp_free_i64(dc
->jmp
.val1
);
2046 tcg_temp_free_i64(next
);
2048 tcg_temp_free_i64(dc
->jmp
.dest
);
2051 } else if (dc
->atomic_excp
!= TILEGX_EXCP_NONE
) {
2052 gen_exception(dc
, dc
->atomic_excp
);
2056 static inline void gen_intermediate_code_internal(TileGXCPU
*cpu
,
2057 TranslationBlock
*tb
,
2061 DisasContext
*dc
= &ctx
;
2062 CPUState
*cs
= CPU(cpu
);
2063 CPUTLGState
*env
= &cpu
->env
;
2064 uint64_t pc_start
= tb
->pc
;
2065 uint64_t next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2068 int max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2072 dc
->exit_tb
= false;
2073 dc
->atomic_excp
= TILEGX_EXCP_NONE
;
2074 dc
->jmp
.cond
= TCG_COND_NEVER
;
2075 TCGV_UNUSED_I64(dc
->jmp
.dest
);
2076 TCGV_UNUSED_I64(dc
->jmp
.val1
);
2077 TCGV_UNUSED_I64(dc
->zero
);
2079 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2080 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2083 max_insns
= CF_COUNT_MASK
;
2085 if (cs
->singlestep_enabled
|| singlestep
) {
2092 j
= tcg_op_buf_count();
2096 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2099 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
2100 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2101 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2103 translate_one_bundle(dc
, cpu_ldq_data(env
, dc
->pc
));
2106 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
2109 dc
->pc
+= TILEGX_BUNDLE_SIZE_IN_BYTES
;
2110 if (++num_insns
>= max_insns
2111 || dc
->pc
>= next_page_start
2112 || tcg_op_buf_full()) {
2113 /* Ending the TB due to TB size or page boundary. Set PC. */
2114 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
2120 gen_tb_end(tb
, num_insns
);
2122 j
= tcg_op_buf_count();
2125 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2128 tb
->size
= dc
->pc
- pc_start
;
2129 tb
->icount
= num_insns
;
2132 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2135 void gen_intermediate_code(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2137 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, false);
2140 void gen_intermediate_code_pc(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2142 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, true);
2145 void restore_state_to_opc(CPUTLGState
*env
, TranslationBlock
*tb
, int pc_pos
)
2147 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2150 void tilegx_tcg_init(void)
2154 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
2155 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUTLGState
, pc
), "pc");
2156 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
2157 cpu_regs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
2158 offsetof(CPUTLGState
, regs
[i
]),