4 * Copyright (c) 2015 Chen Gang
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
27 #include "spr_def_64.h"
29 #define FMT64X "%016" PRIx64
31 static TCGv_ptr cpu_env
;
33 static TCGv cpu_regs
[TILEGX_R_COUNT
];
35 static const char * const reg_names
[64] = {
36 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
37 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
38 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
39 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
40 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
41 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
42 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
43 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
46 /* Modified registers are cached in temporaries until the end of the bundle. */
52 #define MAX_WRITEBACK 4
54 /* This is the state at translation time. */
56 uint64_t pc
; /* Current pc */
58 TCGv zero
; /* For zero register */
60 DisasContextTemp wb
[MAX_WRITEBACK
];
67 TCGCond cond
; /* branch condition */
68 TCGv dest
; /* branch destination */
69 TCGv val1
; /* value to be compared against zero, for cond */
70 } jmp
; /* Jump object, only once in each TB block */
73 #include "exec/gen-icount.h"
75 /* Differentiate the various pipe encodings. */
81 /* Remerge the base opcode and extension fields for switching.
82 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
83 Y2 opcode field is 2 bits. */
84 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
86 /* Similar, but for Y2 only. */
87 #define OEY2(OP, MODE) (OP + MODE * 4)
89 /* Similar, but make sure opcode names match up. */
90 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
91 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
92 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
93 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
94 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
95 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
96 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
98 #define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
99 #define V2_IMM(X) (((X) & 0xffff) * 0x0001000100010001ull)
102 static void gen_exception(DisasContext
*dc
, TileExcp num
)
106 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
108 tmp
= tcg_const_i32(num
);
109 gen_helper_exception(cpu_env
, tmp
);
110 tcg_temp_free_i32(tmp
);
114 static bool check_gr(DisasContext
*dc
, uint8_t reg
)
116 if (likely(reg
< TILEGX_R_COUNT
)) {
126 gen_exception(dc
, TILEGX_EXCP_REG_IDN_ACCESS
);
132 gen_exception(dc
, TILEGX_EXCP_REG_UDN_ACCESS
);
135 g_assert_not_reached();
140 static TCGv
load_zero(DisasContext
*dc
)
142 if (TCGV_IS_UNUSED_I64(dc
->zero
)) {
143 dc
->zero
= tcg_const_i64(0);
148 static TCGv
load_gr(DisasContext
*dc
, unsigned reg
)
150 if (check_gr(dc
, reg
)) {
151 return cpu_regs
[reg
];
153 return load_zero(dc
);
156 static TCGv
dest_gr(DisasContext
*dc
, unsigned reg
)
160 /* Skip the result, mark the exception if necessary, and continue */
165 return dc
->wb
[n
].val
= tcg_temp_new_i64();
168 static void gen_saturate_op(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
169 void (*operate
)(TCGv
, TCGv
, TCGv
))
171 TCGv t0
= tcg_temp_new();
173 tcg_gen_ext32s_tl(tdest
, tsrca
);
174 tcg_gen_ext32s_tl(t0
, tsrcb
);
175 operate(tdest
, tdest
, t0
);
177 tcg_gen_movi_tl(t0
, 0x7fffffff);
178 tcg_gen_movcond_tl(TCG_COND_GT
, tdest
, tdest
, t0
, t0
, tdest
);
179 tcg_gen_movi_tl(t0
, -0x80000000LL
);
180 tcg_gen_movcond_tl(TCG_COND_LT
, tdest
, tdest
, t0
, t0
, tdest
);
185 static void gen_atomic_excp(DisasContext
*dc
, unsigned dest
, TCGv tdest
,
186 TCGv tsrca
, TCGv tsrcb
, TileExcp excp
)
188 #ifdef CONFIG_USER_ONLY
191 tcg_gen_st_tl(tsrca
, cpu_env
, offsetof(CPUTLGState
, atomic_srca
));
192 tcg_gen_st_tl(tsrcb
, cpu_env
, offsetof(CPUTLGState
, atomic_srcb
));
193 t
= tcg_const_i32(dest
);
194 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUTLGState
, atomic_dstr
));
195 tcg_temp_free_i32(t
);
197 /* We're going to write the real result in the exception. But in
198 the meantime we've already created a writeback register, and
199 we don't want that to remain uninitialized. */
200 tcg_gen_movi_tl(tdest
, 0);
202 /* Note that we need to delay issuing the exception that implements
203 the atomic operation until after writing back the results of the
204 instruction occupying the X0 pipe. */
205 dc
->atomic_excp
= excp
;
207 gen_exception(dc
, TILEGX_EXCP_OPCODE_UNIMPLEMENTED
);
211 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
212 specified by the bottom 3 bits of TSRCB, and set TDEST to the
213 low 64 bits of the resulting value. */
214 static void gen_dblalign(TCGv tdest
, TCGv tsrcd
, TCGv tsrca
, TCGv tsrcb
)
216 TCGv t0
= tcg_temp_new();
218 tcg_gen_andi_tl(t0
, tsrcb
, 7);
219 tcg_gen_shli_tl(t0
, t0
, 3);
220 tcg_gen_shr_tl(tdest
, tsrcd
, t0
);
222 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
223 arithmetic on a 6-bit field tells us that 64 - t0 is equal
224 to (t0 ^ 63) + 1. So we can do the shift in two parts,
225 neither of which will be an invalid shift by 64. */
226 tcg_gen_xori_tl(t0
, t0
, 63);
227 tcg_gen_shl_tl(t0
, tsrca
, t0
);
228 tcg_gen_shli_tl(t0
, t0
, 1);
229 tcg_gen_or_tl(tdest
, tdest
, t0
);
234 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
235 right shift is an immediate. */
236 static void gen_dblaligni(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
, int shr
)
238 TCGv t0
= tcg_temp_new();
240 tcg_gen_shri_tl(t0
, tsrcb
, shr
);
241 tcg_gen_shli_tl(tdest
, tsrca
, 64 - shr
);
242 tcg_gen_or_tl(tdest
, tdest
, t0
);
251 static void gen_ext_half(TCGv d
, TCGv s
, MulHalf h
)
255 tcg_gen_ext32u_tl(d
, s
);
258 tcg_gen_ext32s_tl(d
, s
);
261 tcg_gen_shri_tl(d
, s
, 32);
264 tcg_gen_sari_tl(d
, s
, 32);
269 static void gen_mul_half(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
,
270 MulHalf ha
, MulHalf hb
)
272 TCGv t
= tcg_temp_new();
273 gen_ext_half(t
, tsrca
, ha
);
274 gen_ext_half(tdest
, tsrcb
, hb
);
275 tcg_gen_mul_tl(tdest
, tdest
, t
);
279 static TileExcp
gen_st_opcode(DisasContext
*dc
, unsigned dest
, unsigned srca
,
280 unsigned srcb
, TCGMemOp memop
, const char *name
)
283 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
286 tcg_gen_qemu_st_tl(load_gr(dc
, srcb
), load_gr(dc
, srca
),
289 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", name
,
290 reg_names
[srca
], reg_names
[srcb
]);
291 return TILEGX_EXCP_NONE
;
294 static TileExcp
gen_st_add_opcode(DisasContext
*dc
, unsigned srca
, unsigned srcb
,
295 int imm
, TCGMemOp memop
, const char *name
)
297 TCGv tsrca
= load_gr(dc
, srca
);
298 TCGv tsrcb
= load_gr(dc
, srcb
);
300 tcg_gen_qemu_st_tl(tsrcb
, tsrca
, dc
->mmuidx
, memop
);
301 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
303 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", name
,
304 reg_names
[srca
], reg_names
[srcb
], imm
);
305 return TILEGX_EXCP_NONE
;
308 /* Equality comparison with zero can be done quickly and efficiently. */
309 static void gen_v1cmpeq0(TCGv v
)
311 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
312 TCGv c
= tcg_temp_new();
314 /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
315 tcg_gen_and_tl(c
, v
, m
);
316 tcg_gen_add_tl(c
, c
, m
);
317 tcg_gen_or_tl(c
, c
, m
);
318 tcg_gen_nor_tl(c
, c
, v
);
321 /* Shift the msb down to form the lsb boolean result. */
322 tcg_gen_shri_tl(v
, c
, 7);
326 static void gen_v1cmpne0(TCGv v
)
328 TCGv m
= tcg_const_tl(V1_IMM(0x7f));
329 TCGv c
= tcg_temp_new();
331 /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
332 tcg_gen_and_tl(c
, v
, m
);
333 tcg_gen_add_tl(c
, c
, m
);
334 tcg_gen_or_tl(c
, c
, v
);
335 tcg_gen_andc_tl(c
, c
, m
);
338 /* Shift the msb down to form the lsb boolean result. */
339 tcg_gen_shri_tl(v
, c
, 7);
343 /* Vector addition can be performed via arithmetic plus masking. It is
344 efficient this way only for 4 or more elements. */
345 static void gen_v12add(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
, uint64_t sign
)
347 TCGv tmask
= tcg_const_tl(~sign
);
348 TCGv t0
= tcg_temp_new();
349 TCGv t1
= tcg_temp_new();
351 /* ((a & ~sign) + (b & ~sign)) ^ ((a ^ b) & sign). */
352 tcg_gen_and_tl(t0
, tsrca
, tmask
);
353 tcg_gen_and_tl(t1
, tsrcb
, tmask
);
354 tcg_gen_add_tl(tdest
, t0
, t1
);
355 tcg_gen_xor_tl(t0
, tsrca
, tsrcb
);
356 tcg_gen_andc_tl(t0
, t0
, tmask
);
357 tcg_gen_xor_tl(tdest
, tdest
, t0
);
361 tcg_temp_free(tmask
);
364 /* Similarly for vector subtraction. */
365 static void gen_v12sub(TCGv tdest
, TCGv tsrca
, TCGv tsrcb
, uint64_t sign
)
367 TCGv tsign
= tcg_const_tl(sign
);
368 TCGv t0
= tcg_temp_new();
369 TCGv t1
= tcg_temp_new();
371 /* ((a | sign) - (b & ~sign)) ^ ((a ^ ~b) & sign). */
372 tcg_gen_or_tl(t0
, tsrca
, tsign
);
373 tcg_gen_andc_tl(t1
, tsrcb
, tsign
);
374 tcg_gen_sub_tl(tdest
, t0
, t1
);
375 tcg_gen_eqv_tl(t0
, tsrca
, tsrcb
);
376 tcg_gen_and_tl(t0
, t0
, tsign
);
377 tcg_gen_xor_tl(tdest
, tdest
, t0
);
381 tcg_temp_free(tsign
);
384 static void gen_v4sh(TCGv d64
, TCGv a64
, TCGv b64
,
385 void (*generate
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
387 TCGv_i32 al
= tcg_temp_new_i32();
388 TCGv_i32 ah
= tcg_temp_new_i32();
389 TCGv_i32 bl
= tcg_temp_new_i32();
391 tcg_gen_extr_i64_i32(al
, ah
, a64
);
392 tcg_gen_extrl_i64_i32(bl
, b64
);
393 tcg_gen_andi_i32(bl
, bl
, 31);
394 generate(al
, al
, bl
);
395 generate(ah
, ah
, bl
);
396 tcg_gen_concat_i32_i64(d64
, al
, ah
);
398 tcg_temp_free_i32(al
);
399 tcg_temp_free_i32(ah
);
400 tcg_temp_free_i32(bl
);
403 static void gen_v4op(TCGv d64
, TCGv a64
, TCGv b64
,
404 void (*generate
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
406 TCGv_i32 al
= tcg_temp_new_i32();
407 TCGv_i32 ah
= tcg_temp_new_i32();
408 TCGv_i32 bl
= tcg_temp_new_i32();
409 TCGv_i32 bh
= tcg_temp_new_i32();
411 tcg_gen_extr_i64_i32(al
, ah
, a64
);
412 tcg_gen_extr_i64_i32(bl
, bh
, b64
);
413 generate(al
, al
, bl
);
414 generate(ah
, ah
, bh
);
415 tcg_gen_concat_i32_i64(d64
, al
, ah
);
417 tcg_temp_free_i32(al
);
418 tcg_temp_free_i32(ah
);
419 tcg_temp_free_i32(bl
);
420 tcg_temp_free_i32(bh
);
423 static TileExcp
gen_rr_opcode(DisasContext
*dc
, unsigned opext
,
424 unsigned dest
, unsigned srca
)
427 const char *mnemonic
;
429 TileExcp ret
= TILEGX_EXCP_NONE
;
431 /* Eliminate instructions with no output before doing anything else. */
445 case OE_RR_X1(DRAIN
):
448 case OE_RR_X1(FLUSHWB
):
449 mnemonic
= "flushwb";
453 mnemonic
= (dest
== 0x1c && srca
== 0x25 ? "bpt" : "ill");
454 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
455 return TILEGX_EXCP_OPCODE_UNKNOWN
;
460 /* ??? This should yield, especially in system mode. */
463 case OE_RR_X1(SWINT0
):
464 case OE_RR_X1(SWINT2
):
465 case OE_RR_X1(SWINT3
):
466 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
467 case OE_RR_X1(SWINT1
):
468 ret
= TILEGX_EXCP_SYSCALL
;
472 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
474 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s", mnemonic
);
477 case OE_RR_X1(DTLBPR
):
478 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
482 case OE_RR_X1(FLUSH
):
502 case OE_RR_X1(JALRP
):
503 case OE_RR_Y1(JALRP
):
510 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
511 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
513 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
514 dc
->jmp
.dest
= tcg_temp_new();
515 tcg_gen_andi_tl(dc
->jmp
.dest
, load_gr(dc
, srca
), ~7);
518 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
520 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s", mnemonic
, reg_names
[srca
]);
524 tdest
= dest_gr(dc
, dest
);
525 tsrca
= load_gr(dc
, srca
);
528 case OE_RR_X0(CNTLZ
):
529 case OE_RR_Y0(CNTLZ
):
530 gen_helper_cntlz(tdest
, tsrca
);
533 case OE_RR_X0(CNTTZ
):
534 case OE_RR_Y0(CNTTZ
):
535 gen_helper_cnttz(tdest
, tsrca
);
538 case OE_RR_X0(FSINGLE_PACK1
):
539 case OE_RR_Y0(FSINGLE_PACK1
):
541 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
566 case OE_RR_X1(LDNT1S
):
570 case OE_RR_X1(LDNT1U
):
574 case OE_RR_X1(LDNT2S
):
578 case OE_RR_X1(LDNT2U
):
582 case OE_RR_X1(LDNT4S
):
586 case OE_RR_X1(LDNT4U
):
598 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
601 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
602 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
608 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
610 tcg_gen_movi_tl(tdest
, dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
615 gen_helper_pcnt(tdest
, tsrca
);
618 case OE_RR_X0(REVBITS
):
619 case OE_RR_Y0(REVBITS
):
620 gen_helper_revbits(tdest
, tsrca
);
621 mnemonic
= "revbits";
623 case OE_RR_X0(REVBYTES
):
624 case OE_RR_Y0(REVBYTES
):
625 tcg_gen_bswap64_tl(tdest
, tsrca
);
626 mnemonic
= "revbytes";
628 case OE_RR_X0(TBLIDXB0
):
629 case OE_RR_Y0(TBLIDXB0
):
630 case OE_RR_X0(TBLIDXB1
):
631 case OE_RR_Y0(TBLIDXB1
):
632 case OE_RR_X0(TBLIDXB2
):
633 case OE_RR_Y0(TBLIDXB2
):
634 case OE_RR_X0(TBLIDXB3
):
635 case OE_RR_Y0(TBLIDXB3
):
637 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
640 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
641 reg_names
[dest
], reg_names
[srca
]);
645 static TileExcp
gen_rrr_opcode(DisasContext
*dc
, unsigned opext
,
646 unsigned dest
, unsigned srca
, unsigned srcb
)
648 TCGv tdest
= dest_gr(dc
, dest
);
649 TCGv tsrca
= load_gr(dc
, srca
);
650 TCGv tsrcb
= load_gr(dc
, srcb
);
652 const char *mnemonic
;
655 case OE_RRR(ADDXSC
, 0, X0
):
656 case OE_RRR(ADDXSC
, 0, X1
):
657 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_add_tl
);
660 case OE_RRR(ADDX
, 0, X0
):
661 case OE_RRR(ADDX
, 0, X1
):
662 case OE_RRR(ADDX
, 0, Y0
):
663 case OE_RRR(ADDX
, 0, Y1
):
664 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
665 tcg_gen_ext32s_tl(tdest
, tdest
);
668 case OE_RRR(ADD
, 0, X0
):
669 case OE_RRR(ADD
, 0, X1
):
670 case OE_RRR(ADD
, 0, Y0
):
671 case OE_RRR(ADD
, 0, Y1
):
672 tcg_gen_add_tl(tdest
, tsrca
, tsrcb
);
675 case OE_RRR(AND
, 0, X0
):
676 case OE_RRR(AND
, 0, X1
):
677 case OE_RRR(AND
, 5, Y0
):
678 case OE_RRR(AND
, 5, Y1
):
679 tcg_gen_and_tl(tdest
, tsrca
, tsrcb
);
682 case OE_RRR(CMOVEQZ
, 0, X0
):
683 case OE_RRR(CMOVEQZ
, 4, Y0
):
684 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, load_zero(dc
),
685 tsrcb
, load_gr(dc
, dest
));
686 mnemonic
= "cmoveqz";
688 case OE_RRR(CMOVNEZ
, 0, X0
):
689 case OE_RRR(CMOVNEZ
, 4, Y0
):
690 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, load_zero(dc
),
691 tsrcb
, load_gr(dc
, dest
));
692 mnemonic
= "cmovnez";
694 case OE_RRR(CMPEQ
, 0, X0
):
695 case OE_RRR(CMPEQ
, 0, X1
):
696 case OE_RRR(CMPEQ
, 3, Y0
):
697 case OE_RRR(CMPEQ
, 3, Y1
):
698 tcg_gen_setcond_tl(TCG_COND_EQ
, tdest
, tsrca
, tsrcb
);
701 case OE_RRR(CMPEXCH4
, 0, X1
):
702 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
703 TILEGX_EXCP_OPCODE_CMPEXCH4
);
704 mnemonic
= "cmpexch4";
706 case OE_RRR(CMPEXCH
, 0, X1
):
707 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
708 TILEGX_EXCP_OPCODE_CMPEXCH
);
709 mnemonic
= "cmpexch";
711 case OE_RRR(CMPLES
, 0, X0
):
712 case OE_RRR(CMPLES
, 0, X1
):
713 case OE_RRR(CMPLES
, 2, Y0
):
714 case OE_RRR(CMPLES
, 2, Y1
):
715 tcg_gen_setcond_tl(TCG_COND_LE
, tdest
, tsrca
, tsrcb
);
718 case OE_RRR(CMPLEU
, 0, X0
):
719 case OE_RRR(CMPLEU
, 0, X1
):
720 case OE_RRR(CMPLEU
, 2, Y0
):
721 case OE_RRR(CMPLEU
, 2, Y1
):
722 tcg_gen_setcond_tl(TCG_COND_LEU
, tdest
, tsrca
, tsrcb
);
725 case OE_RRR(CMPLTS
, 0, X0
):
726 case OE_RRR(CMPLTS
, 0, X1
):
727 case OE_RRR(CMPLTS
, 2, Y0
):
728 case OE_RRR(CMPLTS
, 2, Y1
):
729 tcg_gen_setcond_tl(TCG_COND_LT
, tdest
, tsrca
, tsrcb
);
732 case OE_RRR(CMPLTU
, 0, X0
):
733 case OE_RRR(CMPLTU
, 0, X1
):
734 case OE_RRR(CMPLTU
, 2, Y0
):
735 case OE_RRR(CMPLTU
, 2, Y1
):
736 tcg_gen_setcond_tl(TCG_COND_LTU
, tdest
, tsrca
, tsrcb
);
739 case OE_RRR(CMPNE
, 0, X0
):
740 case OE_RRR(CMPNE
, 0, X1
):
741 case OE_RRR(CMPNE
, 3, Y0
):
742 case OE_RRR(CMPNE
, 3, Y1
):
743 tcg_gen_setcond_tl(TCG_COND_NE
, tdest
, tsrca
, tsrcb
);
746 case OE_RRR(CMULAF
, 0, X0
):
747 case OE_RRR(CMULA
, 0, X0
):
748 case OE_RRR(CMULFR
, 0, X0
):
749 case OE_RRR(CMULF
, 0, X0
):
750 case OE_RRR(CMULHR
, 0, X0
):
751 case OE_RRR(CMULH
, 0, X0
):
752 case OE_RRR(CMUL
, 0, X0
):
753 case OE_RRR(CRC32_32
, 0, X0
):
754 case OE_RRR(CRC32_8
, 0, X0
):
755 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
756 case OE_RRR(DBLALIGN2
, 0, X0
):
757 case OE_RRR(DBLALIGN2
, 0, X1
):
758 gen_dblaligni(tdest
, tsrca
, tsrcb
, 16);
759 mnemonic
= "dblalign2";
761 case OE_RRR(DBLALIGN4
, 0, X0
):
762 case OE_RRR(DBLALIGN4
, 0, X1
):
763 gen_dblaligni(tdest
, tsrca
, tsrcb
, 32);
764 mnemonic
= "dblalign4";
766 case OE_RRR(DBLALIGN6
, 0, X0
):
767 case OE_RRR(DBLALIGN6
, 0, X1
):
768 gen_dblaligni(tdest
, tsrca
, tsrcb
, 48);
769 mnemonic
= "dblalign6";
771 case OE_RRR(DBLALIGN
, 0, X0
):
772 gen_dblalign(tdest
, load_gr(dc
, dest
), tsrca
, tsrcb
);
773 mnemonic
= "dblalign";
775 case OE_RRR(EXCH4
, 0, X1
):
776 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
777 TILEGX_EXCP_OPCODE_EXCH4
);
780 case OE_RRR(EXCH
, 0, X1
):
781 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
782 TILEGX_EXCP_OPCODE_EXCH
);
785 case OE_RRR(FDOUBLE_ADDSUB
, 0, X0
):
786 case OE_RRR(FDOUBLE_ADD_FLAGS
, 0, X0
):
787 case OE_RRR(FDOUBLE_MUL_FLAGS
, 0, X0
):
788 case OE_RRR(FDOUBLE_PACK1
, 0, X0
):
789 case OE_RRR(FDOUBLE_PACK2
, 0, X0
):
790 case OE_RRR(FDOUBLE_SUB_FLAGS
, 0, X0
):
791 case OE_RRR(FDOUBLE_UNPACK_MAX
, 0, X0
):
792 case OE_RRR(FDOUBLE_UNPACK_MIN
, 0, X0
):
793 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
794 case OE_RRR(FETCHADD4
, 0, X1
):
795 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
796 TILEGX_EXCP_OPCODE_FETCHADD4
);
797 mnemonic
= "fetchadd4";
799 case OE_RRR(FETCHADDGEZ4
, 0, X1
):
800 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
801 TILEGX_EXCP_OPCODE_FETCHADDGEZ4
);
802 mnemonic
= "fetchaddgez4";
804 case OE_RRR(FETCHADDGEZ
, 0, X1
):
805 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
806 TILEGX_EXCP_OPCODE_FETCHADDGEZ
);
807 mnemonic
= "fetchaddgez";
809 case OE_RRR(FETCHADD
, 0, X1
):
810 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
811 TILEGX_EXCP_OPCODE_FETCHADD
);
812 mnemonic
= "fetchadd";
814 case OE_RRR(FETCHAND4
, 0, X1
):
815 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
816 TILEGX_EXCP_OPCODE_FETCHAND4
);
817 mnemonic
= "fetchand4";
819 case OE_RRR(FETCHAND
, 0, X1
):
820 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
821 TILEGX_EXCP_OPCODE_FETCHAND
);
822 mnemonic
= "fetchand";
824 case OE_RRR(FETCHOR4
, 0, X1
):
825 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
826 TILEGX_EXCP_OPCODE_FETCHOR4
);
827 mnemonic
= "fetchor4";
829 case OE_RRR(FETCHOR
, 0, X1
):
830 gen_atomic_excp(dc
, dest
, tdest
, tsrca
, tsrcb
,
831 TILEGX_EXCP_OPCODE_FETCHOR
);
832 mnemonic
= "fetchor";
834 case OE_RRR(FSINGLE_ADD1
, 0, X0
):
835 case OE_RRR(FSINGLE_ADDSUB2
, 0, X0
):
836 case OE_RRR(FSINGLE_MUL1
, 0, X0
):
837 case OE_RRR(FSINGLE_MUL2
, 0, X0
):
838 case OE_RRR(FSINGLE_PACK2
, 0, X0
):
839 case OE_RRR(FSINGLE_SUB1
, 0, X0
):
840 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
841 case OE_RRR(MNZ
, 0, X0
):
842 case OE_RRR(MNZ
, 0, X1
):
843 case OE_RRR(MNZ
, 4, Y0
):
844 case OE_RRR(MNZ
, 4, Y1
):
846 tcg_gen_movcond_tl(TCG_COND_NE
, tdest
, tsrca
, t0
, tsrcb
, t0
);
849 case OE_RRR(MULAX
, 0, X0
):
850 case OE_RRR(MULAX
, 3, Y0
):
851 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
852 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
853 tcg_gen_ext32s_tl(tdest
, tdest
);
856 case OE_RRR(MULA_HS_HS
, 0, X0
):
857 case OE_RRR(MULA_HS_HS
, 9, Y0
):
858 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
859 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
860 mnemonic
= "mula_hs_hs";
862 case OE_RRR(MULA_HS_HU
, 0, X0
):
863 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
864 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
865 mnemonic
= "mula_hs_hu";
867 case OE_RRR(MULA_HS_LS
, 0, X0
):
868 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
869 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
870 mnemonic
= "mula_hs_ls";
872 case OE_RRR(MULA_HS_LU
, 0, X0
):
873 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
874 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
875 mnemonic
= "mula_hs_lu";
877 case OE_RRR(MULA_HU_HU
, 0, X0
):
878 case OE_RRR(MULA_HU_HU
, 9, Y0
):
879 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
880 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
881 mnemonic
= "mula_hu_hu";
883 case OE_RRR(MULA_HU_LS
, 0, X0
):
884 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
885 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
886 mnemonic
= "mula_hu_ls";
888 case OE_RRR(MULA_HU_LU
, 0, X0
):
889 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
890 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
891 mnemonic
= "mula_hu_lu";
893 case OE_RRR(MULA_LS_LS
, 0, X0
):
894 case OE_RRR(MULA_LS_LS
, 9, Y0
):
895 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
896 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
897 mnemonic
= "mula_ls_ls";
899 case OE_RRR(MULA_LS_LU
, 0, X0
):
900 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
901 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
902 mnemonic
= "mula_ls_lu";
904 case OE_RRR(MULA_LU_LU
, 0, X0
):
905 case OE_RRR(MULA_LU_LU
, 9, Y0
):
906 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
907 tcg_gen_add_tl(tdest
, tdest
, load_gr(dc
, dest
));
908 mnemonic
= "mula_lu_lu";
910 case OE_RRR(MULX
, 0, X0
):
911 case OE_RRR(MULX
, 3, Y0
):
912 tcg_gen_mul_tl(tdest
, tsrca
, tsrcb
);
913 tcg_gen_ext32s_tl(tdest
, tdest
);
916 case OE_RRR(MUL_HS_HS
, 0, X0
):
917 case OE_RRR(MUL_HS_HS
, 8, Y0
):
918 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HS
);
919 mnemonic
= "mul_hs_hs";
921 case OE_RRR(MUL_HS_HU
, 0, X0
):
922 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, HU
);
923 mnemonic
= "mul_hs_hu";
925 case OE_RRR(MUL_HS_LS
, 0, X0
):
926 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LS
);
927 mnemonic
= "mul_hs_ls";
929 case OE_RRR(MUL_HS_LU
, 0, X0
):
930 gen_mul_half(tdest
, tsrca
, tsrcb
, HS
, LU
);
931 mnemonic
= "mul_hs_lu";
933 case OE_RRR(MUL_HU_HU
, 0, X0
):
934 case OE_RRR(MUL_HU_HU
, 8, Y0
):
935 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, HU
);
936 mnemonic
= "mul_hu_hu";
938 case OE_RRR(MUL_HU_LS
, 0, X0
):
939 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LS
);
940 mnemonic
= "mul_hu_ls";
942 case OE_RRR(MUL_HU_LU
, 0, X0
):
943 gen_mul_half(tdest
, tsrca
, tsrcb
, HU
, LU
);
944 mnemonic
= "mul_hu_lu";
946 case OE_RRR(MUL_LS_LS
, 0, X0
):
947 case OE_RRR(MUL_LS_LS
, 8, Y0
):
948 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LS
);
949 mnemonic
= "mul_ls_ls";
951 case OE_RRR(MUL_LS_LU
, 0, X0
):
952 gen_mul_half(tdest
, tsrca
, tsrcb
, LS
, LU
);
953 mnemonic
= "mul_ls_lu";
955 case OE_RRR(MUL_LU_LU
, 0, X0
):
956 case OE_RRR(MUL_LU_LU
, 8, Y0
):
957 gen_mul_half(tdest
, tsrca
, tsrcb
, LU
, LU
);
958 mnemonic
= "mul_lu_lu";
960 case OE_RRR(MZ
, 0, X0
):
961 case OE_RRR(MZ
, 0, X1
):
962 case OE_RRR(MZ
, 4, Y0
):
963 case OE_RRR(MZ
, 4, Y1
):
965 tcg_gen_movcond_tl(TCG_COND_EQ
, tdest
, tsrca
, t0
, tsrcb
, t0
);
968 case OE_RRR(NOR
, 0, X0
):
969 case OE_RRR(NOR
, 0, X1
):
970 case OE_RRR(NOR
, 5, Y0
):
971 case OE_RRR(NOR
, 5, Y1
):
972 tcg_gen_nor_tl(tdest
, tsrca
, tsrcb
);
975 case OE_RRR(OR
, 0, X0
):
976 case OE_RRR(OR
, 0, X1
):
977 case OE_RRR(OR
, 5, Y0
):
978 case OE_RRR(OR
, 5, Y1
):
979 tcg_gen_or_tl(tdest
, tsrca
, tsrcb
);
982 case OE_RRR(ROTL
, 0, X0
):
983 case OE_RRR(ROTL
, 0, X1
):
984 case OE_RRR(ROTL
, 6, Y0
):
985 case OE_RRR(ROTL
, 6, Y1
):
986 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
987 tcg_gen_rotl_tl(tdest
, tsrca
, tdest
);
990 case OE_RRR(SHL1ADDX
, 0, X0
):
991 case OE_RRR(SHL1ADDX
, 0, X1
):
992 case OE_RRR(SHL1ADDX
, 7, Y0
):
993 case OE_RRR(SHL1ADDX
, 7, Y1
):
994 tcg_gen_shli_tl(tdest
, tsrca
, 1);
995 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
996 tcg_gen_ext32s_tl(tdest
, tdest
);
997 mnemonic
= "shl1addx";
999 case OE_RRR(SHL1ADD
, 0, X0
):
1000 case OE_RRR(SHL1ADD
, 0, X1
):
1001 case OE_RRR(SHL1ADD
, 1, Y0
):
1002 case OE_RRR(SHL1ADD
, 1, Y1
):
1003 tcg_gen_shli_tl(tdest
, tsrca
, 1);
1004 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
1005 mnemonic
= "shl1add";
1007 case OE_RRR(SHL2ADDX
, 0, X0
):
1008 case OE_RRR(SHL2ADDX
, 0, X1
):
1009 case OE_RRR(SHL2ADDX
, 7, Y0
):
1010 case OE_RRR(SHL2ADDX
, 7, Y1
):
1011 tcg_gen_shli_tl(tdest
, tsrca
, 2);
1012 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
1013 tcg_gen_ext32s_tl(tdest
, tdest
);
1014 mnemonic
= "shl2addx";
1016 case OE_RRR(SHL2ADD
, 0, X0
):
1017 case OE_RRR(SHL2ADD
, 0, X1
):
1018 case OE_RRR(SHL2ADD
, 1, Y0
):
1019 case OE_RRR(SHL2ADD
, 1, Y1
):
1020 tcg_gen_shli_tl(tdest
, tsrca
, 2);
1021 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
1022 mnemonic
= "shl2add";
1024 case OE_RRR(SHL3ADDX
, 0, X0
):
1025 case OE_RRR(SHL3ADDX
, 0, X1
):
1026 case OE_RRR(SHL3ADDX
, 7, Y0
):
1027 case OE_RRR(SHL3ADDX
, 7, Y1
):
1028 tcg_gen_shli_tl(tdest
, tsrca
, 3);
1029 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
1030 tcg_gen_ext32s_tl(tdest
, tdest
);
1031 mnemonic
= "shl3addx";
1033 case OE_RRR(SHL3ADD
, 0, X0
):
1034 case OE_RRR(SHL3ADD
, 0, X1
):
1035 case OE_RRR(SHL3ADD
, 1, Y0
):
1036 case OE_RRR(SHL3ADD
, 1, Y1
):
1037 tcg_gen_shli_tl(tdest
, tsrca
, 3);
1038 tcg_gen_add_tl(tdest
, tdest
, tsrcb
);
1039 mnemonic
= "shl3add";
1041 case OE_RRR(SHLX
, 0, X0
):
1042 case OE_RRR(SHLX
, 0, X1
):
1043 tcg_gen_andi_tl(tdest
, tsrcb
, 31);
1044 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
1045 tcg_gen_ext32s_tl(tdest
, tdest
);
1048 case OE_RRR(SHL
, 0, X0
):
1049 case OE_RRR(SHL
, 0, X1
):
1050 case OE_RRR(SHL
, 6, Y0
):
1051 case OE_RRR(SHL
, 6, Y1
):
1052 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
1053 tcg_gen_shl_tl(tdest
, tsrca
, tdest
);
1056 case OE_RRR(SHRS
, 0, X0
):
1057 case OE_RRR(SHRS
, 0, X1
):
1058 case OE_RRR(SHRS
, 6, Y0
):
1059 case OE_RRR(SHRS
, 6, Y1
):
1060 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
1061 tcg_gen_sar_tl(tdest
, tsrca
, tdest
);
1064 case OE_RRR(SHRUX
, 0, X0
):
1065 case OE_RRR(SHRUX
, 0, X1
):
1066 t0
= tcg_temp_new();
1067 tcg_gen_andi_tl(t0
, tsrcb
, 31);
1068 tcg_gen_ext32u_tl(tdest
, tsrca
);
1069 tcg_gen_shr_tl(tdest
, tdest
, t0
);
1070 tcg_gen_ext32s_tl(tdest
, tdest
);
1074 case OE_RRR(SHRU
, 0, X0
):
1075 case OE_RRR(SHRU
, 0, X1
):
1076 case OE_RRR(SHRU
, 6, Y0
):
1077 case OE_RRR(SHRU
, 6, Y1
):
1078 tcg_gen_andi_tl(tdest
, tsrcb
, 63);
1079 tcg_gen_shr_tl(tdest
, tsrca
, tdest
);
1082 case OE_RRR(SHUFFLEBYTES
, 0, X0
):
1083 gen_helper_shufflebytes(tdest
, load_gr(dc
, dest
), tsrca
, tsrca
);
1084 mnemonic
= "shufflebytes";
1086 case OE_RRR(SUBXSC
, 0, X0
):
1087 case OE_RRR(SUBXSC
, 0, X1
):
1088 gen_saturate_op(tdest
, tsrca
, tsrcb
, tcg_gen_sub_tl
);
1089 mnemonic
= "subxsc";
1091 case OE_RRR(SUBX
, 0, X0
):
1092 case OE_RRR(SUBX
, 0, X1
):
1093 case OE_RRR(SUBX
, 0, Y0
):
1094 case OE_RRR(SUBX
, 0, Y1
):
1095 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1096 tcg_gen_ext32s_tl(tdest
, tdest
);
1099 case OE_RRR(SUB
, 0, X0
):
1100 case OE_RRR(SUB
, 0, X1
):
1101 case OE_RRR(SUB
, 0, Y0
):
1102 case OE_RRR(SUB
, 0, Y1
):
1103 tcg_gen_sub_tl(tdest
, tsrca
, tsrcb
);
1106 case OE_RRR(V1ADDUC
, 0, X0
):
1107 case OE_RRR(V1ADDUC
, 0, X1
):
1108 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1109 case OE_RRR(V1ADD
, 0, X0
):
1110 case OE_RRR(V1ADD
, 0, X1
):
1111 gen_v12add(tdest
, tsrca
, tsrcb
, V1_IMM(0x80));
1114 case OE_RRR(V1ADIFFU
, 0, X0
):
1115 case OE_RRR(V1AVGU
, 0, X0
):
1116 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1117 case OE_RRR(V1CMPEQ
, 0, X0
):
1118 case OE_RRR(V1CMPEQ
, 0, X1
):
1119 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1120 gen_v1cmpeq0(tdest
);
1121 mnemonic
= "v1cmpeq";
1123 case OE_RRR(V1CMPLES
, 0, X0
):
1124 case OE_RRR(V1CMPLES
, 0, X1
):
1125 case OE_RRR(V1CMPLEU
, 0, X0
):
1126 case OE_RRR(V1CMPLEU
, 0, X1
):
1127 case OE_RRR(V1CMPLTS
, 0, X0
):
1128 case OE_RRR(V1CMPLTS
, 0, X1
):
1129 case OE_RRR(V1CMPLTU
, 0, X0
):
1130 case OE_RRR(V1CMPLTU
, 0, X1
):
1131 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1132 case OE_RRR(V1CMPNE
, 0, X0
):
1133 case OE_RRR(V1CMPNE
, 0, X1
):
1134 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1135 gen_v1cmpne0(tdest
);
1136 mnemonic
= "v1cmpne";
1138 case OE_RRR(V1DDOTPUA
, 0, X0
):
1139 case OE_RRR(V1DDOTPUSA
, 0, X0
):
1140 case OE_RRR(V1DDOTPUS
, 0, X0
):
1141 case OE_RRR(V1DDOTPU
, 0, X0
):
1142 case OE_RRR(V1DOTPA
, 0, X0
):
1143 case OE_RRR(V1DOTPUA
, 0, X0
):
1144 case OE_RRR(V1DOTPUSA
, 0, X0
):
1145 case OE_RRR(V1DOTPUS
, 0, X0
):
1146 case OE_RRR(V1DOTPU
, 0, X0
):
1147 case OE_RRR(V1DOTP
, 0, X0
):
1148 case OE_RRR(V1INT_H
, 0, X0
):
1149 case OE_RRR(V1INT_H
, 0, X1
):
1150 case OE_RRR(V1INT_L
, 0, X0
):
1151 case OE_RRR(V1INT_L
, 0, X1
):
1152 case OE_RRR(V1MAXU
, 0, X0
):
1153 case OE_RRR(V1MAXU
, 0, X1
):
1154 case OE_RRR(V1MINU
, 0, X0
):
1155 case OE_RRR(V1MINU
, 0, X1
):
1156 case OE_RRR(V1MNZ
, 0, X0
):
1157 case OE_RRR(V1MNZ
, 0, X1
):
1158 case OE_RRR(V1MULTU
, 0, X0
):
1159 case OE_RRR(V1MULUS
, 0, X0
):
1160 case OE_RRR(V1MULU
, 0, X0
):
1161 case OE_RRR(V1MZ
, 0, X0
):
1162 case OE_RRR(V1MZ
, 0, X1
):
1163 case OE_RRR(V1SADAU
, 0, X0
):
1164 case OE_RRR(V1SADU
, 0, X0
):
1165 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1166 case OE_RRR(V1SHL
, 0, X0
):
1167 case OE_RRR(V1SHL
, 0, X1
):
1168 gen_helper_v1shl(tdest
, tsrca
, tsrcb
);
1171 case OE_RRR(V1SHRS
, 0, X0
):
1172 case OE_RRR(V1SHRS
, 0, X1
):
1173 gen_helper_v1shrs(tdest
, tsrca
, tsrcb
);
1174 mnemonic
= "v1shrs";
1176 case OE_RRR(V1SHRU
, 0, X0
):
1177 case OE_RRR(V1SHRU
, 0, X1
):
1178 gen_helper_v1shru(tdest
, tsrca
, tsrcb
);
1179 mnemonic
= "v1shru";
1181 case OE_RRR(V1SUBUC
, 0, X0
):
1182 case OE_RRR(V1SUBUC
, 0, X1
):
1183 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1184 case OE_RRR(V1SUB
, 0, X0
):
1185 case OE_RRR(V1SUB
, 0, X1
):
1186 gen_v12sub(tdest
, tsrca
, tsrcb
, V1_IMM(0x80));
1189 case OE_RRR(V2ADDSC
, 0, X0
):
1190 case OE_RRR(V2ADDSC
, 0, X1
):
1191 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1192 case OE_RRR(V2ADD
, 0, X0
):
1193 case OE_RRR(V2ADD
, 0, X1
):
1194 gen_v12add(tdest
, tsrca
, tsrcb
, V2_IMM(0x8000));
1197 case OE_RRR(V2ADIFFS
, 0, X0
):
1198 case OE_RRR(V2AVGS
, 0, X0
):
1199 case OE_RRR(V2CMPEQ
, 0, X0
):
1200 case OE_RRR(V2CMPEQ
, 0, X1
):
1201 case OE_RRR(V2CMPLES
, 0, X0
):
1202 case OE_RRR(V2CMPLES
, 0, X1
):
1203 case OE_RRR(V2CMPLEU
, 0, X0
):
1204 case OE_RRR(V2CMPLEU
, 0, X1
):
1205 case OE_RRR(V2CMPLTS
, 0, X0
):
1206 case OE_RRR(V2CMPLTS
, 0, X1
):
1207 case OE_RRR(V2CMPLTU
, 0, X0
):
1208 case OE_RRR(V2CMPLTU
, 0, X1
):
1209 case OE_RRR(V2CMPNE
, 0, X0
):
1210 case OE_RRR(V2CMPNE
, 0, X1
):
1211 case OE_RRR(V2DOTPA
, 0, X0
):
1212 case OE_RRR(V2DOTP
, 0, X0
):
1213 case OE_RRR(V2INT_H
, 0, X0
):
1214 case OE_RRR(V2INT_H
, 0, X1
):
1215 case OE_RRR(V2INT_L
, 0, X0
):
1216 case OE_RRR(V2INT_L
, 0, X1
):
1217 case OE_RRR(V2MAXS
, 0, X0
):
1218 case OE_RRR(V2MAXS
, 0, X1
):
1219 case OE_RRR(V2MINS
, 0, X0
):
1220 case OE_RRR(V2MINS
, 0, X1
):
1221 case OE_RRR(V2MNZ
, 0, X0
):
1222 case OE_RRR(V2MNZ
, 0, X1
):
1223 case OE_RRR(V2MULFSC
, 0, X0
):
1224 case OE_RRR(V2MULS
, 0, X0
):
1225 case OE_RRR(V2MULTS
, 0, X0
):
1226 case OE_RRR(V2MZ
, 0, X0
):
1227 case OE_RRR(V2MZ
, 0, X1
):
1228 case OE_RRR(V2PACKH
, 0, X0
):
1229 case OE_RRR(V2PACKH
, 0, X1
):
1230 case OE_RRR(V2PACKL
, 0, X0
):
1231 case OE_RRR(V2PACKL
, 0, X1
):
1232 case OE_RRR(V2PACKUC
, 0, X0
):
1233 case OE_RRR(V2PACKUC
, 0, X1
):
1234 case OE_RRR(V2SADAS
, 0, X0
):
1235 case OE_RRR(V2SADAU
, 0, X0
):
1236 case OE_RRR(V2SADS
, 0, X0
):
1237 case OE_RRR(V2SADU
, 0, X0
):
1238 case OE_RRR(V2SHLSC
, 0, X0
):
1239 case OE_RRR(V2SHLSC
, 0, X1
):
1240 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1241 case OE_RRR(V2SHL
, 0, X0
):
1242 case OE_RRR(V2SHL
, 0, X1
):
1243 gen_helper_v2shl(tdest
, tsrca
, tsrcb
);
1246 case OE_RRR(V2SHRS
, 0, X0
):
1247 case OE_RRR(V2SHRS
, 0, X1
):
1248 gen_helper_v2shrs(tdest
, tsrca
, tsrcb
);
1249 mnemonic
= "v2shrs";
1251 case OE_RRR(V2SHRU
, 0, X0
):
1252 case OE_RRR(V2SHRU
, 0, X1
):
1253 gen_helper_v2shru(tdest
, tsrca
, tsrcb
);
1254 mnemonic
= "v2shru";
1256 case OE_RRR(V2SUBSC
, 0, X0
):
1257 case OE_RRR(V2SUBSC
, 0, X1
):
1258 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1259 case OE_RRR(V2SUB
, 0, X0
):
1260 case OE_RRR(V2SUB
, 0, X1
):
1261 gen_v12sub(tdest
, tsrca
, tsrcb
, V2_IMM(0x8000));
1264 case OE_RRR(V4ADDSC
, 0, X0
):
1265 case OE_RRR(V4ADDSC
, 0, X1
):
1266 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1267 case OE_RRR(V4ADD
, 0, X0
):
1268 case OE_RRR(V4ADD
, 0, X1
):
1269 gen_v4op(tdest
, tsrca
, tsrcb
, tcg_gen_add_i32
);
1272 case OE_RRR(V4INT_H
, 0, X0
):
1273 case OE_RRR(V4INT_H
, 0, X1
):
1274 tcg_gen_shri_tl(tdest
, tsrcb
, 32);
1275 tcg_gen_deposit_tl(tdest
, tsrca
, tdest
, 0, 32);
1276 mnemonic
= "v4int_h";
1278 case OE_RRR(V4INT_L
, 0, X0
):
1279 case OE_RRR(V4INT_L
, 0, X1
):
1280 tcg_gen_deposit_tl(tdest
, tsrcb
, tsrca
, 32, 32);
1281 mnemonic
= "v4int_l";
1283 case OE_RRR(V4PACKSC
, 0, X0
):
1284 case OE_RRR(V4PACKSC
, 0, X1
):
1285 case OE_RRR(V4SHLSC
, 0, X0
):
1286 case OE_RRR(V4SHLSC
, 0, X1
):
1287 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1288 case OE_RRR(V4SHL
, 0, X0
):
1289 case OE_RRR(V4SHL
, 0, X1
):
1290 gen_v4sh(tdest
, tsrca
, tsrcb
, tcg_gen_shl_i32
);
1293 case OE_RRR(V4SHRS
, 0, X0
):
1294 case OE_RRR(V4SHRS
, 0, X1
):
1295 gen_v4sh(tdest
, tsrca
, tsrcb
, tcg_gen_sar_i32
);
1296 mnemonic
= "v4shrs";
1298 case OE_RRR(V4SHRU
, 0, X0
):
1299 case OE_RRR(V4SHRU
, 0, X1
):
1300 gen_v4sh(tdest
, tsrca
, tsrcb
, tcg_gen_shr_i32
);
1301 mnemonic
= "v4shru";
1303 case OE_RRR(V4SUBSC
, 0, X0
):
1304 case OE_RRR(V4SUBSC
, 0, X1
):
1305 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1306 case OE_RRR(V4SUB
, 0, X0
):
1307 case OE_RRR(V4SUB
, 0, X1
):
1308 gen_v4op(tdest
, tsrca
, tsrcb
, tcg_gen_sub_i32
);
1311 case OE_RRR(XOR
, 0, X0
):
1312 case OE_RRR(XOR
, 0, X1
):
1313 case OE_RRR(XOR
, 5, Y0
):
1314 case OE_RRR(XOR
, 5, Y1
):
1315 tcg_gen_xor_tl(tdest
, tsrca
, tsrcb
);
1319 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1322 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %s", mnemonic
,
1323 reg_names
[dest
], reg_names
[srca
], reg_names
[srcb
]);
1324 return TILEGX_EXCP_NONE
;
1327 static TileExcp
gen_rri_opcode(DisasContext
*dc
, unsigned opext
,
1328 unsigned dest
, unsigned srca
, int imm
)
1330 TCGv tdest
= dest_gr(dc
, dest
);
1331 TCGv tsrca
= load_gr(dc
, srca
);
1332 const char *mnemonic
;
1338 case OE(ADDI_OPCODE_Y0
, 0, Y0
):
1339 case OE(ADDI_OPCODE_Y1
, 0, Y1
):
1340 case OE_IM(ADDI
, X0
):
1341 case OE_IM(ADDI
, X1
):
1342 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1345 case OE(ADDXI_OPCODE_Y0
, 0, Y0
):
1346 case OE(ADDXI_OPCODE_Y1
, 0, Y1
):
1347 case OE_IM(ADDXI
, X0
):
1348 case OE_IM(ADDXI
, X1
):
1349 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1350 tcg_gen_ext32s_tl(tdest
, tdest
);
1353 case OE(ANDI_OPCODE_Y0
, 0, Y0
):
1354 case OE(ANDI_OPCODE_Y1
, 0, Y1
):
1355 case OE_IM(ANDI
, X0
):
1356 case OE_IM(ANDI
, X1
):
1357 tcg_gen_andi_tl(tdest
, tsrca
, imm
);
1360 case OE(CMPEQI_OPCODE_Y0
, 0, Y0
):
1361 case OE(CMPEQI_OPCODE_Y1
, 0, Y1
):
1362 case OE_IM(CMPEQI
, X0
):
1363 case OE_IM(CMPEQI
, X1
):
1364 tcg_gen_setcondi_tl(TCG_COND_EQ
, tdest
, tsrca
, imm
);
1365 mnemonic
= "cmpeqi";
1367 case OE(CMPLTSI_OPCODE_Y0
, 0, Y0
):
1368 case OE(CMPLTSI_OPCODE_Y1
, 0, Y1
):
1369 case OE_IM(CMPLTSI
, X0
):
1370 case OE_IM(CMPLTSI
, X1
):
1371 tcg_gen_setcondi_tl(TCG_COND_LT
, tdest
, tsrca
, imm
);
1372 mnemonic
= "cmpltsi";
1374 case OE_IM(CMPLTUI
, X0
):
1375 case OE_IM(CMPLTUI
, X1
):
1376 tcg_gen_setcondi_tl(TCG_COND_LTU
, tdest
, tsrca
, imm
);
1377 mnemonic
= "cmpltui";
1379 case OE_IM(LD1S_ADD
, X1
):
1381 mnemonic
= "ld1s_add";
1383 case OE_IM(LD1U_ADD
, X1
):
1385 mnemonic
= "ld1u_add";
1387 case OE_IM(LD2S_ADD
, X1
):
1389 mnemonic
= "ld2s_add";
1391 case OE_IM(LD2U_ADD
, X1
):
1393 mnemonic
= "ld2u_add";
1395 case OE_IM(LD4S_ADD
, X1
):
1397 mnemonic
= "ld4s_add";
1399 case OE_IM(LD4U_ADD
, X1
):
1401 mnemonic
= "ld4u_add";
1403 case OE_IM(LDNT1S_ADD
, X1
):
1405 mnemonic
= "ldnt1s_add";
1407 case OE_IM(LDNT1U_ADD
, X1
):
1409 mnemonic
= "ldnt1u_add";
1411 case OE_IM(LDNT2S_ADD
, X1
):
1413 mnemonic
= "ldnt2s_add";
1415 case OE_IM(LDNT2U_ADD
, X1
):
1417 mnemonic
= "ldnt2u_add";
1419 case OE_IM(LDNT4S_ADD
, X1
):
1421 mnemonic
= "ldnt4s_add";
1423 case OE_IM(LDNT4U_ADD
, X1
):
1425 mnemonic
= "ldnt4u_add";
1427 case OE_IM(LDNT_ADD
, X1
):
1429 mnemonic
= "ldnt_add";
1431 case OE_IM(LD_ADD
, X1
):
1433 mnemonic
= "ldnt_add";
1435 tcg_gen_qemu_ld_tl(tdest
, tsrca
, dc
->mmuidx
, memop
);
1436 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1438 case OE_IM(LDNA_ADD
, X1
):
1439 tcg_gen_andi_tl(tdest
, tsrca
, ~7);
1440 tcg_gen_qemu_ld_tl(tdest
, tdest
, dc
->mmuidx
, MO_TEQ
);
1441 tcg_gen_addi_tl(dest_gr(dc
, srca
), tsrca
, imm
);
1442 mnemonic
= "ldna_add";
1444 case OE_IM(ORI
, X0
):
1445 case OE_IM(ORI
, X1
):
1446 tcg_gen_ori_tl(tdest
, tsrca
, imm
);
1449 case OE_IM(V1ADDI
, X0
):
1450 case OE_IM(V1ADDI
, X1
):
1451 t0
= tcg_const_tl(V1_IMM(imm
));
1452 gen_v12add(tdest
, tsrca
, t0
, V1_IMM(0x80));
1454 mnemonic
= "v1addi";
1456 case OE_IM(V1CMPEQI
, X0
):
1457 case OE_IM(V1CMPEQI
, X1
):
1458 tcg_gen_xori_tl(tdest
, tsrca
, V1_IMM(imm
));
1459 gen_v1cmpeq0(tdest
);
1460 mnemonic
= "v1cmpeqi";
1462 case OE_IM(V1CMPLTSI
, X0
):
1463 case OE_IM(V1CMPLTSI
, X1
):
1464 case OE_IM(V1CMPLTUI
, X0
):
1465 case OE_IM(V1CMPLTUI
, X1
):
1466 case OE_IM(V1MAXUI
, X0
):
1467 case OE_IM(V1MAXUI
, X1
):
1468 case OE_IM(V1MINUI
, X0
):
1469 case OE_IM(V1MINUI
, X1
):
1470 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1471 case OE_IM(V2ADDI
, X0
):
1472 case OE_IM(V2ADDI
, X1
):
1473 t0
= tcg_const_tl(V2_IMM(imm
));
1474 gen_v12add(tdest
, tsrca
, t0
, V2_IMM(0x8000));
1476 mnemonic
= "v2addi";
1478 case OE_IM(V2CMPEQI
, X0
):
1479 case OE_IM(V2CMPEQI
, X1
):
1480 case OE_IM(V2CMPLTSI
, X0
):
1481 case OE_IM(V2CMPLTSI
, X1
):
1482 case OE_IM(V2CMPLTUI
, X0
):
1483 case OE_IM(V2CMPLTUI
, X1
):
1484 case OE_IM(V2MAXSI
, X0
):
1485 case OE_IM(V2MAXSI
, X1
):
1486 case OE_IM(V2MINSI
, X0
):
1487 case OE_IM(V2MINSI
, X1
):
1488 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1489 case OE_IM(XORI
, X0
):
1490 case OE_IM(XORI
, X1
):
1491 tcg_gen_xori_tl(tdest
, tsrca
, imm
);
1495 case OE_SH(ROTLI
, X0
):
1496 case OE_SH(ROTLI
, X1
):
1497 case OE_SH(ROTLI
, Y0
):
1498 case OE_SH(ROTLI
, Y1
):
1499 tcg_gen_rotli_tl(tdest
, tsrca
, imm
);
1502 case OE_SH(SHLI
, X0
):
1503 case OE_SH(SHLI
, X1
):
1504 case OE_SH(SHLI
, Y0
):
1505 case OE_SH(SHLI
, Y1
):
1506 tcg_gen_shli_tl(tdest
, tsrca
, imm
);
1509 case OE_SH(SHLXI
, X0
):
1510 case OE_SH(SHLXI
, X1
):
1511 tcg_gen_shli_tl(tdest
, tsrca
, imm
& 31);
1512 tcg_gen_ext32s_tl(tdest
, tdest
);
1515 case OE_SH(SHRSI
, X0
):
1516 case OE_SH(SHRSI
, X1
):
1517 case OE_SH(SHRSI
, Y0
):
1518 case OE_SH(SHRSI
, Y1
):
1519 tcg_gen_sari_tl(tdest
, tsrca
, imm
);
1522 case OE_SH(SHRUI
, X0
):
1523 case OE_SH(SHRUI
, X1
):
1524 case OE_SH(SHRUI
, Y0
):
1525 case OE_SH(SHRUI
, Y1
):
1526 tcg_gen_shri_tl(tdest
, tsrca
, imm
);
1529 case OE_SH(SHRUXI
, X0
):
1530 case OE_SH(SHRUXI
, X1
):
1531 if ((imm
& 31) == 0) {
1532 tcg_gen_ext32s_tl(tdest
, tsrca
);
1534 tcg_gen_ext32u_tl(tdest
, tsrca
);
1535 tcg_gen_shri_tl(tdest
, tdest
, imm
& 31);
1539 case OE_SH(V1SHLI
, X0
):
1540 case OE_SH(V1SHLI
, X1
):
1543 tcg_gen_andi_tl(tdest
, tsrca
, V1_IMM(i3
));
1544 tcg_gen_shli_tl(tdest
, tdest
, i2
);
1545 mnemonic
= "v1shli";
1547 case OE_SH(V1SHRSI
, X0
):
1548 case OE_SH(V1SHRSI
, X1
):
1549 t0
= tcg_const_tl(imm
& 7);
1550 gen_helper_v1shrs(tdest
, tsrca
, t0
);
1552 mnemonic
= "v1shrsi";
1554 case OE_SH(V1SHRUI
, X0
):
1555 case OE_SH(V1SHRUI
, X1
):
1557 i3
= (0xff << i2
) & 0xff;
1558 tcg_gen_andi_tl(tdest
, tsrca
, V1_IMM(i3
));
1559 tcg_gen_shri_tl(tdest
, tdest
, i2
);
1560 mnemonic
= "v1shrui";
1562 case OE_SH(V2SHLI
, X0
):
1563 case OE_SH(V2SHLI
, X1
):
1564 case OE_SH(V2SHRSI
, X0
):
1565 case OE_SH(V2SHRSI
, X1
):
1566 case OE_SH(V2SHRUI
, X0
):
1567 case OE_SH(V2SHRUI
, X1
):
1568 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1570 case OE(ADDLI_OPCODE_X0
, 0, X0
):
1571 case OE(ADDLI_OPCODE_X1
, 0, X1
):
1572 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1575 case OE(ADDXLI_OPCODE_X0
, 0, X0
):
1576 case OE(ADDXLI_OPCODE_X1
, 0, X1
):
1577 tcg_gen_addi_tl(tdest
, tsrca
, imm
);
1578 tcg_gen_ext32s_tl(tdest
, tdest
);
1579 mnemonic
= "addxli";
1581 case OE(SHL16INSLI_OPCODE_X0
, 0, X0
):
1582 case OE(SHL16INSLI_OPCODE_X1
, 0, X1
):
1583 tcg_gen_shli_tl(tdest
, tsrca
, 16);
1584 tcg_gen_ori_tl(tdest
, tdest
, imm
& 0xffff);
1585 mnemonic
= "shl16insli";
1589 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1592 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %d", mnemonic
,
1593 reg_names
[dest
], reg_names
[srca
], imm
);
1594 return TILEGX_EXCP_NONE
;
1597 static TileExcp
gen_bf_opcode_x0(DisasContext
*dc
, unsigned ext
,
1598 unsigned dest
, unsigned srca
,
1599 unsigned bfs
, unsigned bfe
)
1601 TCGv tdest
= dest_gr(dc
, dest
);
1602 TCGv tsrca
= load_gr(dc
, srca
);
1605 const char *mnemonic
;
1607 /* The bitfield is either between E and S inclusive,
1608 or up from S and down from E inclusive. */
1610 len
= bfe
- bfs
+ 1;
1612 len
= (64 - bfs
) + (bfe
+ 1);
1616 case BFEXTU_BF_OPCODE_X0
:
1617 if (bfs
== 0 && bfe
== 7) {
1618 tcg_gen_ext8u_tl(tdest
, tsrca
);
1619 } else if (bfs
== 0 && bfe
== 15) {
1620 tcg_gen_ext16u_tl(tdest
, tsrca
);
1621 } else if (bfs
== 0 && bfe
== 31) {
1622 tcg_gen_ext32u_tl(tdest
, tsrca
);
1626 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1628 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1630 tcg_gen_shri_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1632 mnemonic
= "bfextu";
1635 case BFEXTS_BF_OPCODE_X0
:
1636 if (bfs
== 0 && bfe
== 7) {
1637 tcg_gen_ext8s_tl(tdest
, tsrca
);
1638 } else if (bfs
== 0 && bfe
== 15) {
1639 tcg_gen_ext16s_tl(tdest
, tsrca
);
1640 } else if (bfs
== 0 && bfe
== 31) {
1641 tcg_gen_ext32s_tl(tdest
, tsrca
);
1645 tcg_gen_shli_tl(tdest
, tsrca
, rol
);
1647 tcg_gen_rotli_tl(tdest
, tsrca
, rol
);
1649 tcg_gen_sari_tl(tdest
, tdest
, (bfs
+ rol
) & 63);
1651 mnemonic
= "bfexts";
1654 case BFINS_BF_OPCODE_X0
:
1655 tsrcd
= load_gr(dc
, dest
);
1657 tcg_gen_deposit_tl(tdest
, tsrcd
, tsrca
, bfs
, len
);
1659 tcg_gen_rotri_tl(tdest
, tsrcd
, bfs
);
1660 tcg_gen_deposit_tl(tdest
, tdest
, tsrca
, 0, len
);
1661 tcg_gen_rotli_tl(tdest
, tdest
, bfs
);
1666 case MM_BF_OPCODE_X0
:
1667 tsrcd
= load_gr(dc
, dest
);
1669 tcg_gen_deposit_tl(tdest
, tsrca
, tsrcd
, 0, len
);
1671 uint64_t mask
= len
== 64 ? -1 : rol64((1ULL << len
) - 1, bfs
);
1672 TCGv tmp
= tcg_const_tl(mask
);
1674 tcg_gen_and_tl(tdest
, tsrcd
, tmp
);
1675 tcg_gen_andc_tl(tmp
, tsrca
, tmp
);
1676 tcg_gen_or_tl(tdest
, tdest
, tmp
);
1683 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1686 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s, %u, %u", mnemonic
,
1687 reg_names
[dest
], reg_names
[srca
], bfs
, bfe
);
1688 return TILEGX_EXCP_NONE
;
1691 static TileExcp
gen_branch_opcode_x1(DisasContext
*dc
, unsigned ext
,
1692 unsigned srca
, int off
)
1694 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1695 const char *mnemonic
;
1697 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1698 dc
->jmp
.val1
= tcg_temp_new();
1699 tcg_gen_mov_tl(dc
->jmp
.val1
, load_gr(dc
, srca
));
1701 /* Note that the "predict taken" opcodes have bit 0 clear.
1702 Therefore, fold the two cases together by setting bit 0. */
1704 case BEQZ_BRANCH_OPCODE_X1
:
1705 dc
->jmp
.cond
= TCG_COND_EQ
;
1708 case BNEZ_BRANCH_OPCODE_X1
:
1709 dc
->jmp
.cond
= TCG_COND_NE
;
1712 case BGEZ_BRANCH_OPCODE_X1
:
1713 dc
->jmp
.cond
= TCG_COND_GE
;
1716 case BGTZ_BRANCH_OPCODE_X1
:
1717 dc
->jmp
.cond
= TCG_COND_GT
;
1720 case BLEZ_BRANCH_OPCODE_X1
:
1721 dc
->jmp
.cond
= TCG_COND_LE
;
1724 case BLTZ_BRANCH_OPCODE_X1
:
1725 dc
->jmp
.cond
= TCG_COND_LT
;
1728 case BLBC_BRANCH_OPCODE_X1
:
1729 dc
->jmp
.cond
= TCG_COND_EQ
;
1730 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1733 case BLBS_BRANCH_OPCODE_X1
:
1734 dc
->jmp
.cond
= TCG_COND_NE
;
1735 tcg_gen_andi_tl(dc
->jmp
.val1
, dc
->jmp
.val1
, 1);
1739 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1742 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1743 qemu_log("%s%s %s, " TARGET_FMT_lx
" <%s>",
1744 mnemonic
, ext
& 1 ? "" : "t",
1745 reg_names
[srca
], tgt
, lookup_symbol(tgt
));
1747 return TILEGX_EXCP_NONE
;
1750 static TileExcp
gen_jump_opcode_x1(DisasContext
*dc
, unsigned ext
, int off
)
1752 target_ulong tgt
= dc
->pc
+ off
* TILEGX_BUNDLE_SIZE_IN_BYTES
;
1753 const char *mnemonic
= "j";
1755 /* The extension field is 1 bit, therefore we only have JAL and J. */
1756 if (ext
== JAL_JUMP_OPCODE_X1
) {
1757 tcg_gen_movi_tl(dest_gr(dc
, TILEGX_R_LR
),
1758 dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
1761 dc
->jmp
.cond
= TCG_COND_ALWAYS
;
1762 dc
->jmp
.dest
= tcg_const_tl(tgt
);
1764 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1765 qemu_log("%s " TARGET_FMT_lx
" <%s>",
1766 mnemonic
, tgt
, lookup_symbol(tgt
));
1768 return TILEGX_EXCP_NONE
;
1774 void (*get
)(TCGv
, TCGv_ptr
);
1775 void (*put
)(TCGv_ptr
, TCGv
);
1778 static const TileSPR
*find_spr(unsigned spr
)
1780 /* Allow the compiler to construct the binary search tree. */
1781 #define D(N, O, G, P) \
1782 case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
1786 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CMPEXCH
]), 0, 0)
1787 D(INTERRUPT_CRITICAL_SECTION
,
1788 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_CRITICAL_SEC
]), 0, 0)
1790 offsetof(CPUTLGState
, spregs
[TILEGX_SPR_SIM_CONTROL
]), 0, 0)
1795 qemu_log_mask(LOG_UNIMP
, "UNIMP SPR %u\n", spr
);
1799 static TileExcp
gen_mtspr_x1(DisasContext
*dc
, unsigned spr
, unsigned srca
)
1801 const TileSPR
*def
= find_spr(spr
);
1805 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr spr[%u], %s", spr
, reg_names
[srca
]);
1806 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1809 tsrca
= load_gr(dc
, srca
);
1811 def
->put(cpu_env
, tsrca
);
1813 tcg_gen_st_tl(tsrca
, cpu_env
, def
->offset
);
1815 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, %s", def
->name
, reg_names
[srca
]);
1816 return TILEGX_EXCP_NONE
;
1819 static TileExcp
gen_mfspr_x1(DisasContext
*dc
, unsigned dest
, unsigned spr
)
1821 const TileSPR
*def
= find_spr(spr
);
1825 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mtspr %s, spr[%u]", reg_names
[dest
], spr
);
1826 return TILEGX_EXCP_OPCODE_UNKNOWN
;
1829 tdest
= dest_gr(dc
, dest
);
1831 def
->get(tdest
, cpu_env
);
1833 tcg_gen_ld_tl(tdest
, cpu_env
, def
->offset
);
1835 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "mfspr %s, %s", reg_names
[dest
], def
->name
);
1836 return TILEGX_EXCP_NONE
;
1839 static TileExcp
decode_y0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1841 unsigned opc
= get_Opcode_Y0(bundle
);
1842 unsigned ext
= get_RRROpcodeExtension_Y0(bundle
);
1843 unsigned dest
= get_Dest_Y0(bundle
);
1844 unsigned srca
= get_SrcA_Y0(bundle
);
1849 case RRR_1_OPCODE_Y0
:
1850 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1851 ext
= get_UnaryOpcodeExtension_Y0(bundle
);
1852 return gen_rr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
);
1855 case RRR_0_OPCODE_Y0
:
1856 case RRR_2_OPCODE_Y0
:
1857 case RRR_3_OPCODE_Y0
:
1858 case RRR_4_OPCODE_Y0
:
1859 case RRR_5_OPCODE_Y0
:
1860 case RRR_6_OPCODE_Y0
:
1861 case RRR_7_OPCODE_Y0
:
1862 case RRR_8_OPCODE_Y0
:
1863 case RRR_9_OPCODE_Y0
:
1864 srcb
= get_SrcB_Y0(bundle
);
1865 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, srcb
);
1867 case SHIFT_OPCODE_Y0
:
1868 ext
= get_ShiftOpcodeExtension_Y0(bundle
);
1869 imm
= get_ShAmt_Y0(bundle
);
1870 return gen_rri_opcode(dc
, OE(opc
, ext
, Y0
), dest
, srca
, imm
);
1872 case ADDI_OPCODE_Y0
:
1873 case ADDXI_OPCODE_Y0
:
1874 case ANDI_OPCODE_Y0
:
1875 case CMPEQI_OPCODE_Y0
:
1876 case CMPLTSI_OPCODE_Y0
:
1877 imm
= (int8_t)get_Imm8_Y0(bundle
);
1878 return gen_rri_opcode(dc
, OE(opc
, 0, Y0
), dest
, srca
, imm
);
1881 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1885 static TileExcp
decode_y1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1887 unsigned opc
= get_Opcode_Y1(bundle
);
1888 unsigned ext
= get_RRROpcodeExtension_Y1(bundle
);
1889 unsigned dest
= get_Dest_Y1(bundle
);
1890 unsigned srca
= get_SrcA_Y1(bundle
);
1894 switch (get_Opcode_Y1(bundle
)) {
1895 case RRR_1_OPCODE_Y1
:
1896 if (ext
== UNARY_RRR_1_OPCODE_Y0
) {
1897 ext
= get_UnaryOpcodeExtension_Y1(bundle
);
1898 return gen_rr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
);
1901 case RRR_0_OPCODE_Y1
:
1902 case RRR_2_OPCODE_Y1
:
1903 case RRR_3_OPCODE_Y1
:
1904 case RRR_4_OPCODE_Y1
:
1905 case RRR_5_OPCODE_Y1
:
1906 case RRR_6_OPCODE_Y1
:
1907 case RRR_7_OPCODE_Y1
:
1908 srcb
= get_SrcB_Y1(bundle
);
1909 return gen_rrr_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, srcb
);
1911 case SHIFT_OPCODE_Y1
:
1912 ext
= get_ShiftOpcodeExtension_Y1(bundle
);
1913 imm
= get_ShAmt_Y1(bundle
);
1914 return gen_rri_opcode(dc
, OE(opc
, ext
, Y1
), dest
, srca
, imm
);
1916 case ADDI_OPCODE_Y1
:
1917 case ADDXI_OPCODE_Y1
:
1918 case ANDI_OPCODE_Y1
:
1919 case CMPEQI_OPCODE_Y1
:
1920 case CMPLTSI_OPCODE_Y1
:
1921 imm
= (int8_t)get_Imm8_Y1(bundle
);
1922 return gen_rri_opcode(dc
, OE(opc
, 0, Y1
), dest
, srca
, imm
);
1925 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1929 static TileExcp
decode_y2(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1931 unsigned mode
= get_Mode(bundle
);
1932 unsigned opc
= get_Opcode_Y2(bundle
);
1933 unsigned srca
= get_SrcA_Y2(bundle
);
1934 unsigned srcbdest
= get_SrcBDest_Y2(bundle
);
1935 const char *mnemonic
;
1938 switch (OEY2(opc
, mode
)) {
1939 case OEY2(LD1S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1943 case OEY2(LD1U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1947 case OEY2(LD2S_OPCODE_Y2
, MODE_OPCODE_YA2
):
1951 case OEY2(LD2U_OPCODE_Y2
, MODE_OPCODE_YA2
):
1955 case OEY2(LD4S_OPCODE_Y2
, MODE_OPCODE_YB2
):
1959 case OEY2(LD4U_OPCODE_Y2
, MODE_OPCODE_YB2
):
1963 case OEY2(LD_OPCODE_Y2
, MODE_OPCODE_YB2
):
1967 tcg_gen_qemu_ld_tl(dest_gr(dc
, srcbdest
), load_gr(dc
, srca
),
1969 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "%s %s, %s", mnemonic
,
1970 reg_names
[srcbdest
], reg_names
[srca
]);
1971 return TILEGX_EXCP_NONE
;
1973 case OEY2(ST1_OPCODE_Y2
, MODE_OPCODE_YC2
):
1974 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_UB
, "st1");
1975 case OEY2(ST2_OPCODE_Y2
, MODE_OPCODE_YC2
):
1976 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUW
, "st2");
1977 case OEY2(ST4_OPCODE_Y2
, MODE_OPCODE_YC2
):
1978 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEUL
, "st4");
1979 case OEY2(ST_OPCODE_Y2
, MODE_OPCODE_YC2
):
1980 return gen_st_opcode(dc
, 0, srca
, srcbdest
, MO_TEQ
, "st");
1983 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
1987 static TileExcp
decode_x0(DisasContext
*dc
, tilegx_bundle_bits bundle
)
1989 unsigned opc
= get_Opcode_X0(bundle
);
1990 unsigned dest
= get_Dest_X0(bundle
);
1991 unsigned srca
= get_SrcA_X0(bundle
);
1992 unsigned ext
, srcb
, bfs
, bfe
;
1996 case RRR_0_OPCODE_X0
:
1997 ext
= get_RRROpcodeExtension_X0(bundle
);
1998 if (ext
== UNARY_RRR_0_OPCODE_X0
) {
1999 ext
= get_UnaryOpcodeExtension_X0(bundle
);
2000 return gen_rr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
);
2002 srcb
= get_SrcB_X0(bundle
);
2003 return gen_rrr_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, srcb
);
2005 case SHIFT_OPCODE_X0
:
2006 ext
= get_ShiftOpcodeExtension_X0(bundle
);
2007 imm
= get_ShAmt_X0(bundle
);
2008 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
2010 case IMM8_OPCODE_X0
:
2011 ext
= get_Imm8OpcodeExtension_X0(bundle
);
2012 imm
= (int8_t)get_Imm8_X0(bundle
);
2013 return gen_rri_opcode(dc
, OE(opc
, ext
, X0
), dest
, srca
, imm
);
2016 ext
= get_BFOpcodeExtension_X0(bundle
);
2017 bfs
= get_BFStart_X0(bundle
);
2018 bfe
= get_BFEnd_X0(bundle
);
2019 return gen_bf_opcode_x0(dc
, ext
, dest
, srca
, bfs
, bfe
);
2021 case ADDLI_OPCODE_X0
:
2022 case SHL16INSLI_OPCODE_X0
:
2023 case ADDXLI_OPCODE_X0
:
2024 imm
= (int16_t)get_Imm16_X0(bundle
);
2025 return gen_rri_opcode(dc
, OE(opc
, 0, X0
), dest
, srca
, imm
);
2028 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
2032 static TileExcp
decode_x1(DisasContext
*dc
, tilegx_bundle_bits bundle
)
2034 unsigned opc
= get_Opcode_X1(bundle
);
2035 unsigned dest
= get_Dest_X1(bundle
);
2036 unsigned srca
= get_SrcA_X1(bundle
);
2041 case RRR_0_OPCODE_X1
:
2042 ext
= get_RRROpcodeExtension_X1(bundle
);
2043 srcb
= get_SrcB_X1(bundle
);
2045 case UNARY_RRR_0_OPCODE_X1
:
2046 ext
= get_UnaryOpcodeExtension_X1(bundle
);
2047 return gen_rr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
);
2048 case ST1_RRR_0_OPCODE_X1
:
2049 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "st1");
2050 case ST2_RRR_0_OPCODE_X1
:
2051 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "st2");
2052 case ST4_RRR_0_OPCODE_X1
:
2053 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "st4");
2054 case STNT1_RRR_0_OPCODE_X1
:
2055 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_UB
, "stnt1");
2056 case STNT2_RRR_0_OPCODE_X1
:
2057 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUW
, "stnt2");
2058 case STNT4_RRR_0_OPCODE_X1
:
2059 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEUL
, "stnt4");
2060 case STNT_RRR_0_OPCODE_X1
:
2061 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "stnt");
2062 case ST_RRR_0_OPCODE_X1
:
2063 return gen_st_opcode(dc
, dest
, srca
, srcb
, MO_TEQ
, "st");
2065 return gen_rrr_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, srcb
);
2067 case SHIFT_OPCODE_X1
:
2068 ext
= get_ShiftOpcodeExtension_X1(bundle
);
2069 imm
= get_ShAmt_X1(bundle
);
2070 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
2072 case IMM8_OPCODE_X1
:
2073 ext
= get_Imm8OpcodeExtension_X1(bundle
);
2074 imm
= (int8_t)get_Dest_Imm8_X1(bundle
);
2075 srcb
= get_SrcB_X1(bundle
);
2077 case ST1_ADD_IMM8_OPCODE_X1
:
2078 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "st1_add");
2079 case ST2_ADD_IMM8_OPCODE_X1
:
2080 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "st2_add");
2081 case ST4_ADD_IMM8_OPCODE_X1
:
2082 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "st4_add");
2083 case STNT1_ADD_IMM8_OPCODE_X1
:
2084 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_UB
, "stnt1_add");
2085 case STNT2_ADD_IMM8_OPCODE_X1
:
2086 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUW
, "stnt2_add");
2087 case STNT4_ADD_IMM8_OPCODE_X1
:
2088 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEUL
, "stnt4_add");
2089 case STNT_ADD_IMM8_OPCODE_X1
:
2090 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "stnt_add");
2091 case ST_ADD_IMM8_OPCODE_X1
:
2092 return gen_st_add_opcode(dc
, srca
, srcb
, imm
, MO_TEQ
, "st_add");
2093 case MFSPR_IMM8_OPCODE_X1
:
2094 return gen_mfspr_x1(dc
, dest
, get_MF_Imm14_X1(bundle
));
2095 case MTSPR_IMM8_OPCODE_X1
:
2096 return gen_mtspr_x1(dc
, get_MT_Imm14_X1(bundle
), srca
);
2098 imm
= (int8_t)get_Imm8_X1(bundle
);
2099 return gen_rri_opcode(dc
, OE(opc
, ext
, X1
), dest
, srca
, imm
);
2101 case BRANCH_OPCODE_X1
:
2102 ext
= get_BrType_X1(bundle
);
2103 imm
= sextract32(get_BrOff_X1(bundle
), 0, 17);
2104 return gen_branch_opcode_x1(dc
, ext
, srca
, imm
);
2106 case JUMP_OPCODE_X1
:
2107 ext
= get_JumpOpcodeExtension_X1(bundle
);
2108 imm
= sextract32(get_JumpOff_X1(bundle
), 0, 27);
2109 return gen_jump_opcode_x1(dc
, ext
, imm
);
2111 case ADDLI_OPCODE_X1
:
2112 case SHL16INSLI_OPCODE_X1
:
2113 case ADDXLI_OPCODE_X1
:
2114 imm
= (int16_t)get_Imm16_X1(bundle
);
2115 return gen_rri_opcode(dc
, OE(opc
, 0, X1
), dest
, srca
, imm
);
2118 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED
;
2122 static void notice_excp(DisasContext
*dc
, uint64_t bundle
,
2123 const char *type
, TileExcp excp
)
2125 if (likely(excp
== TILEGX_EXCP_NONE
)) {
2128 gen_exception(dc
, excp
);
2129 if (excp
== TILEGX_EXCP_OPCODE_UNIMPLEMENTED
) {
2130 qemu_log_mask(LOG_UNIMP
, "UNIMP %s, [" FMT64X
"]\n", type
, bundle
);
2134 static void translate_one_bundle(DisasContext
*dc
, uint64_t bundle
)
2138 for (i
= 0; i
< ARRAY_SIZE(dc
->wb
); i
++) {
2139 DisasContextTemp
*wb
= &dc
->wb
[i
];
2140 wb
->reg
= TILEGX_R_NOREG
;
2141 TCGV_UNUSED_I64(wb
->val
);
2145 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2146 tcg_gen_debug_insn_start(dc
->pc
);
2149 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %" PRIx64
": { ", dc
->pc
);
2150 if (get_Mode(bundle
)) {
2151 notice_excp(dc
, bundle
, "y0", decode_y0(dc
, bundle
));
2152 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2153 notice_excp(dc
, bundle
, "y1", decode_y1(dc
, bundle
));
2154 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2155 notice_excp(dc
, bundle
, "y2", decode_y2(dc
, bundle
));
2157 notice_excp(dc
, bundle
, "x0", decode_x0(dc
, bundle
));
2158 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " ; ");
2159 notice_excp(dc
, bundle
, "x1", decode_x1(dc
, bundle
));
2161 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " }\n");
2163 for (i
= dc
->num_wb
- 1; i
>= 0; --i
) {
2164 DisasContextTemp
*wb
= &dc
->wb
[i
];
2165 if (wb
->reg
< TILEGX_R_COUNT
) {
2166 tcg_gen_mov_i64(cpu_regs
[wb
->reg
], wb
->val
);
2168 tcg_temp_free_i64(wb
->val
);
2171 if (dc
->jmp
.cond
!= TCG_COND_NEVER
) {
2172 if (dc
->jmp
.cond
== TCG_COND_ALWAYS
) {
2173 tcg_gen_mov_i64(cpu_pc
, dc
->jmp
.dest
);
2175 TCGv next
= tcg_const_i64(dc
->pc
+ TILEGX_BUNDLE_SIZE_IN_BYTES
);
2176 tcg_gen_movcond_i64(dc
->jmp
.cond
, cpu_pc
,
2177 dc
->jmp
.val1
, load_zero(dc
),
2178 dc
->jmp
.dest
, next
);
2179 tcg_temp_free_i64(dc
->jmp
.val1
);
2180 tcg_temp_free_i64(next
);
2182 tcg_temp_free_i64(dc
->jmp
.dest
);
2185 } else if (dc
->atomic_excp
!= TILEGX_EXCP_NONE
) {
2186 gen_exception(dc
, dc
->atomic_excp
);
2190 static inline void gen_intermediate_code_internal(TileGXCPU
*cpu
,
2191 TranslationBlock
*tb
,
2195 DisasContext
*dc
= &ctx
;
2196 CPUState
*cs
= CPU(cpu
);
2197 CPUTLGState
*env
= &cpu
->env
;
2198 uint64_t pc_start
= tb
->pc
;
2199 uint64_t next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2202 int max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2206 dc
->exit_tb
= false;
2207 dc
->atomic_excp
= TILEGX_EXCP_NONE
;
2208 dc
->jmp
.cond
= TCG_COND_NEVER
;
2209 TCGV_UNUSED_I64(dc
->jmp
.dest
);
2210 TCGV_UNUSED_I64(dc
->jmp
.val1
);
2211 TCGV_UNUSED_I64(dc
->zero
);
2213 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2214 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2217 max_insns
= CF_COUNT_MASK
;
2219 if (cs
->singlestep_enabled
|| singlestep
) {
2226 j
= tcg_op_buf_count();
2230 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2233 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
2234 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2235 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2237 translate_one_bundle(dc
, cpu_ldq_data(env
, dc
->pc
));
2240 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
2243 dc
->pc
+= TILEGX_BUNDLE_SIZE_IN_BYTES
;
2244 if (++num_insns
>= max_insns
2245 || dc
->pc
>= next_page_start
2246 || tcg_op_buf_full()) {
2247 /* Ending the TB due to TB size or page boundary. Set PC. */
2248 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
2254 gen_tb_end(tb
, num_insns
);
2256 j
= tcg_op_buf_count();
2259 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2262 tb
->size
= dc
->pc
- pc_start
;
2263 tb
->icount
= num_insns
;
2266 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2269 void gen_intermediate_code(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2271 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, false);
2274 void gen_intermediate_code_pc(CPUTLGState
*env
, struct TranslationBlock
*tb
)
2276 gen_intermediate_code_internal(tilegx_env_get_cpu(env
), tb
, true);
2279 void restore_state_to_opc(CPUTLGState
*env
, TranslationBlock
*tb
, int pc_pos
)
2281 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2284 void tilegx_tcg_init(void)
2288 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
2289 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
, offsetof(CPUTLGState
, pc
), "pc");
2290 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
2291 cpu_regs
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
2292 offsetof(CPUTLGState
, regs
[i
]),