target-tilegx: Handle conditional branch instructions
[qemu/ar7.git] / target-tilegx / translate.c
blob9c95633fa069d852b64300a81cfc5f647a5fbdbd
1 /*
2 * QEMU TILE-Gx CPU
4 * Copyright (c) 2015 Chen Gang
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
28 #define FMT64X "%016" PRIx64
30 static TCGv_ptr cpu_env;
31 static TCGv cpu_pc;
32 static TCGv cpu_regs[TILEGX_R_COUNT];
34 static const char * const reg_names[64] = {
35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
39 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
40 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
41 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
42 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
45 /* Modified registers are cached in temporaries until the end of the bundle. */
46 typedef struct {
47 unsigned reg;
48 TCGv val;
49 } DisasContextTemp;
51 #define MAX_WRITEBACK 4
53 /* This is the state at translation time. */
54 typedef struct {
55 uint64_t pc; /* Current pc */
57 TCGv zero; /* For zero register */
59 DisasContextTemp wb[MAX_WRITEBACK];
60 int num_wb;
61 int mmuidx;
62 bool exit_tb;
64 struct {
65 TCGCond cond; /* branch condition */
66 TCGv dest; /* branch destination */
67 TCGv val1; /* value to be compared against zero, for cond */
68 } jmp; /* Jump object, only once in each TB block */
69 } DisasContext;
71 #include "exec/gen-icount.h"
73 /* Differentiate the various pipe encodings. */
74 #define TY_X0 0
75 #define TY_X1 1
76 #define TY_Y0 2
77 #define TY_Y1 3
79 /* Remerge the base opcode and extension fields for switching.
80 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
81 Y2 opcode field is 2 bits. */
82 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
84 /* Similar, but for Y2 only. */
85 #define OEY2(OP, MODE) (OP + MODE * 4)
87 /* Similar, but make sure opcode names match up. */
88 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
89 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
90 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
91 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
92 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
93 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
94 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
97 static void gen_exception(DisasContext *dc, TileExcp num)
99 TCGv_i32 tmp;
101 tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
103 tmp = tcg_const_i32(num);
104 gen_helper_exception(cpu_env, tmp);
105 tcg_temp_free_i32(tmp);
106 dc->exit_tb = true;
109 static bool check_gr(DisasContext *dc, uint8_t reg)
111 if (likely(reg < TILEGX_R_COUNT)) {
112 return true;
115 switch (reg) {
116 case TILEGX_R_SN:
117 case TILEGX_R_ZERO:
118 break;
119 case TILEGX_R_IDN0:
120 case TILEGX_R_IDN1:
121 gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
122 break;
123 case TILEGX_R_UDN0:
124 case TILEGX_R_UDN1:
125 case TILEGX_R_UDN2:
126 case TILEGX_R_UDN3:
127 gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
128 break;
129 default:
130 g_assert_not_reached();
132 return false;
135 static TCGv load_zero(DisasContext *dc)
137 if (TCGV_IS_UNUSED_I64(dc->zero)) {
138 dc->zero = tcg_const_i64(0);
140 return dc->zero;
143 static TCGv load_gr(DisasContext *dc, unsigned reg)
145 if (check_gr(dc, reg)) {
146 return cpu_regs[reg];
148 return load_zero(dc);
151 static TCGv dest_gr(DisasContext *dc, unsigned reg)
153 int n;
155 /* Skip the result, mark the exception if necessary, and continue */
156 check_gr(dc, reg);
158 n = dc->num_wb++;
159 dc->wb[n].reg = reg;
160 return dc->wb[n].val = tcg_temp_new_i64();
163 static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
164 void (*operate)(TCGv, TCGv, TCGv))
166 TCGv t0 = tcg_temp_new();
168 tcg_gen_ext32s_tl(tdest, tsrca);
169 tcg_gen_ext32s_tl(t0, tsrcb);
170 operate(tdest, tdest, t0);
172 tcg_gen_movi_tl(t0, 0x7fffffff);
173 tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
174 tcg_gen_movi_tl(t0, -0x80000000LL);
175 tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
177 tcg_temp_free(t0);
180 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
181 specified by the bottom 3 bits of TSRCB, and set TDEST to the
182 low 64 bits of the resulting value. */
183 static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
185 TCGv t0 = tcg_temp_new();
187 tcg_gen_andi_tl(t0, tsrcb, 7);
188 tcg_gen_shli_tl(t0, t0, 3);
189 tcg_gen_shr_tl(tdest, tsrcd, t0);
191 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
192 arithmetic on a 6-bit field tells us that 64 - t0 is equal
193 to (t0 ^ 63) + 1. So we can do the shift in two parts,
194 neither of which will be an invalid shift by 64. */
195 tcg_gen_xori_tl(t0, t0, 63);
196 tcg_gen_shl_tl(t0, tsrca, t0);
197 tcg_gen_shli_tl(t0, t0, 1);
198 tcg_gen_or_tl(tdest, tdest, t0);
200 tcg_temp_free(t0);
203 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
204 right shift is an immediate. */
205 static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
207 TCGv t0 = tcg_temp_new();
209 tcg_gen_shri_tl(t0, tsrcb, shr);
210 tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
211 tcg_gen_or_tl(tdest, tdest, t0);
213 tcg_temp_free(t0);
216 static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
217 unsigned srcb, TCGMemOp memop, const char *name)
219 if (dest) {
220 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
223 tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
224 dc->mmuidx, memop);
226 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
227 reg_names[srca], reg_names[srcb]);
228 return TILEGX_EXCP_NONE;
231 static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
232 int imm, TCGMemOp memop, const char *name)
234 TCGv tsrca = load_gr(dc, srca);
235 TCGv tsrcb = load_gr(dc, srcb);
237 tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
238 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
240 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
241 reg_names[srca], reg_names[srcb], imm);
242 return TILEGX_EXCP_NONE;
245 static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
246 unsigned dest, unsigned srca)
248 TCGv tdest, tsrca;
249 const char *mnemonic;
250 TCGMemOp memop;
252 /* Eliminate nops and jumps before doing anything else. */
253 switch (opext) {
254 case OE_RR_Y0(NOP):
255 case OE_RR_Y1(NOP):
256 case OE_RR_X0(NOP):
257 case OE_RR_X1(NOP):
258 mnemonic = "nop";
259 goto do_nop;
260 case OE_RR_Y0(FNOP):
261 case OE_RR_Y1(FNOP):
262 case OE_RR_X0(FNOP):
263 case OE_RR_X1(FNOP):
264 mnemonic = "fnop";
265 do_nop:
266 if (srca || dest) {
267 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
269 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
270 return TILEGX_EXCP_NONE;
272 case OE_RR_X1(JRP):
273 case OE_RR_Y1(JRP):
274 mnemonic = "jrp";
275 goto do_jr;
276 case OE_RR_X1(JR):
277 case OE_RR_Y1(JR):
278 mnemonic = "jr";
279 goto do_jr;
280 case OE_RR_X1(JALRP):
281 case OE_RR_Y1(JALRP):
282 mnemonic = "jalrp";
283 goto do_jalr;
284 case OE_RR_X1(JALR):
285 case OE_RR_Y1(JALR):
286 mnemonic = "jalr";
287 do_jalr:
288 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
289 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
290 do_jr:
291 dc->jmp.cond = TCG_COND_ALWAYS;
292 dc->jmp.dest = tcg_temp_new();
293 tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
294 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
295 return TILEGX_EXCP_NONE;
298 tdest = dest_gr(dc, dest);
299 tsrca = load_gr(dc, srca);
301 switch (opext) {
302 case OE_RR_X0(CNTLZ):
303 case OE_RR_Y0(CNTLZ):
304 gen_helper_cntlz(tdest, tsrca);
305 mnemonic = "cntlz";
306 break;
307 case OE_RR_X0(CNTTZ):
308 case OE_RR_Y0(CNTTZ):
309 gen_helper_cnttz(tdest, tsrca);
310 mnemonic = "cnttz";
311 break;
312 case OE_RR_X1(DRAIN):
313 case OE_RR_X1(DTLBPR):
314 case OE_RR_X1(FINV):
315 case OE_RR_X1(FLUSHWB):
316 case OE_RR_X1(FLUSH):
317 case OE_RR_X0(FSINGLE_PACK1):
318 case OE_RR_Y0(FSINGLE_PACK1):
319 case OE_RR_X1(ICOH):
320 case OE_RR_X1(ILL):
321 case OE_RR_Y1(ILL):
322 case OE_RR_X1(INV):
323 case OE_RR_X1(IRET):
324 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
325 case OE_RR_X1(LD1S):
326 memop = MO_SB;
327 mnemonic = "ld1s";
328 goto do_load;
329 case OE_RR_X1(LD1U):
330 memop = MO_UB;
331 mnemonic = "ld1u";
332 goto do_load;
333 case OE_RR_X1(LD2S):
334 memop = MO_TESW;
335 mnemonic = "ld2s";
336 goto do_load;
337 case OE_RR_X1(LD2U):
338 memop = MO_TEUW;
339 mnemonic = "ld2u";
340 goto do_load;
341 case OE_RR_X1(LD4S):
342 memop = MO_TESL;
343 mnemonic = "ld4s";
344 goto do_load;
345 case OE_RR_X1(LD4U):
346 memop = MO_TEUL;
347 mnemonic = "ld4u";
348 goto do_load;
349 case OE_RR_X1(LDNT1S):
350 memop = MO_SB;
351 mnemonic = "ldnt1s";
352 goto do_load;
353 case OE_RR_X1(LDNT1U):
354 memop = MO_UB;
355 mnemonic = "ldnt1u";
356 goto do_load;
357 case OE_RR_X1(LDNT2S):
358 memop = MO_TESW;
359 mnemonic = "ldnt2s";
360 goto do_load;
361 case OE_RR_X1(LDNT2U):
362 memop = MO_TEUW;
363 mnemonic = "ldnt2u";
364 goto do_load;
365 case OE_RR_X1(LDNT4S):
366 memop = MO_TESL;
367 mnemonic = "ldnt4s";
368 goto do_load;
369 case OE_RR_X1(LDNT4U):
370 memop = MO_TEUL;
371 mnemonic = "ldnt4u";
372 goto do_load;
373 case OE_RR_X1(LDNT):
374 memop = MO_TEQ;
375 mnemonic = "ldnt";
376 goto do_load;
377 case OE_RR_X1(LD):
378 memop = MO_TEQ;
379 mnemonic = "ld";
380 do_load:
381 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
382 break;
383 case OE_RR_X1(LDNA):
384 tcg_gen_andi_tl(tdest, tsrca, ~7);
385 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
386 mnemonic = "ldna";
387 break;
388 case OE_RR_X1(LNK):
389 case OE_RR_Y1(LNK):
390 if (srca) {
391 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
393 tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
394 mnemonic = "lnk";
395 break;
396 case OE_RR_X1(MF):
397 case OE_RR_X1(NAP):
398 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
399 case OE_RR_X0(PCNT):
400 case OE_RR_Y0(PCNT):
401 gen_helper_pcnt(tdest, tsrca);
402 mnemonic = "pcnt";
403 break;
404 case OE_RR_X0(REVBITS):
405 case OE_RR_Y0(REVBITS):
406 gen_helper_revbits(tdest, tsrca);
407 mnemonic = "revbits";
408 break;
409 case OE_RR_X0(REVBYTES):
410 case OE_RR_Y0(REVBYTES):
411 tcg_gen_bswap64_tl(tdest, tsrca);
412 mnemonic = "revbytes";
413 break;
414 case OE_RR_X1(SWINT0):
415 case OE_RR_X1(SWINT1):
416 case OE_RR_X1(SWINT2):
417 case OE_RR_X1(SWINT3):
418 case OE_RR_X0(TBLIDXB0):
419 case OE_RR_Y0(TBLIDXB0):
420 case OE_RR_X0(TBLIDXB1):
421 case OE_RR_Y0(TBLIDXB1):
422 case OE_RR_X0(TBLIDXB2):
423 case OE_RR_Y0(TBLIDXB2):
424 case OE_RR_X0(TBLIDXB3):
425 case OE_RR_Y0(TBLIDXB3):
426 case OE_RR_X1(WH64):
427 default:
428 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
432 reg_names[dest], reg_names[srca]);
433 return TILEGX_EXCP_NONE;
436 static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
437 unsigned dest, unsigned srca, unsigned srcb)
439 TCGv tdest = dest_gr(dc, dest);
440 TCGv tsrca = load_gr(dc, srca);
441 TCGv tsrcb = load_gr(dc, srcb);
442 const char *mnemonic;
444 switch (opext) {
445 case OE_RRR(ADDXSC, 0, X0):
446 case OE_RRR(ADDXSC, 0, X1):
447 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
448 mnemonic = "addxsc";
449 break;
450 case OE_RRR(ADDX, 0, X0):
451 case OE_RRR(ADDX, 0, X1):
452 case OE_RRR(ADDX, 0, Y0):
453 case OE_RRR(ADDX, 0, Y1):
454 tcg_gen_add_tl(tdest, tsrca, tsrcb);
455 tcg_gen_ext32s_tl(tdest, tdest);
456 mnemonic = "addx";
457 break;
458 case OE_RRR(ADD, 0, X0):
459 case OE_RRR(ADD, 0, X1):
460 case OE_RRR(ADD, 0, Y0):
461 case OE_RRR(ADD, 0, Y1):
462 tcg_gen_add_tl(tdest, tsrca, tsrcb);
463 mnemonic = "add";
464 break;
465 case OE_RRR(AND, 0, X0):
466 case OE_RRR(AND, 0, X1):
467 case OE_RRR(AND, 5, Y0):
468 case OE_RRR(AND, 5, Y1):
469 tcg_gen_and_tl(tdest, tsrca, tsrcb);
470 mnemonic = "and";
471 break;
472 case OE_RRR(CMOVEQZ, 0, X0):
473 case OE_RRR(CMOVEQZ, 4, Y0):
474 case OE_RRR(CMOVNEZ, 0, X0):
475 case OE_RRR(CMOVNEZ, 4, Y0):
476 case OE_RRR(CMPEQ, 0, X0):
477 case OE_RRR(CMPEQ, 0, X1):
478 case OE_RRR(CMPEQ, 3, Y0):
479 case OE_RRR(CMPEQ, 3, Y1):
480 case OE_RRR(CMPEXCH4, 0, X1):
481 case OE_RRR(CMPEXCH, 0, X1):
482 case OE_RRR(CMPLES, 0, X0):
483 case OE_RRR(CMPLES, 0, X1):
484 case OE_RRR(CMPLES, 2, Y0):
485 case OE_RRR(CMPLES, 2, Y1):
486 case OE_RRR(CMPLEU, 0, X0):
487 case OE_RRR(CMPLEU, 0, X1):
488 case OE_RRR(CMPLEU, 2, Y0):
489 case OE_RRR(CMPLEU, 2, Y1):
490 case OE_RRR(CMPLTS, 0, X0):
491 case OE_RRR(CMPLTS, 0, X1):
492 case OE_RRR(CMPLTS, 2, Y0):
493 case OE_RRR(CMPLTS, 2, Y1):
494 case OE_RRR(CMPLTU, 0, X0):
495 case OE_RRR(CMPLTU, 0, X1):
496 case OE_RRR(CMPLTU, 2, Y0):
497 case OE_RRR(CMPLTU, 2, Y1):
498 case OE_RRR(CMPNE, 0, X0):
499 case OE_RRR(CMPNE, 0, X1):
500 case OE_RRR(CMPNE, 3, Y0):
501 case OE_RRR(CMPNE, 3, Y1):
502 case OE_RRR(CMULAF, 0, X0):
503 case OE_RRR(CMULA, 0, X0):
504 case OE_RRR(CMULFR, 0, X0):
505 case OE_RRR(CMULF, 0, X0):
506 case OE_RRR(CMULHR, 0, X0):
507 case OE_RRR(CMULH, 0, X0):
508 case OE_RRR(CMUL, 0, X0):
509 case OE_RRR(CRC32_32, 0, X0):
510 case OE_RRR(CRC32_8, 0, X0):
511 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
512 case OE_RRR(DBLALIGN2, 0, X0):
513 case OE_RRR(DBLALIGN2, 0, X1):
514 gen_dblaligni(tdest, tsrca, tsrcb, 16);
515 mnemonic = "dblalign2";
516 break;
517 case OE_RRR(DBLALIGN4, 0, X0):
518 case OE_RRR(DBLALIGN4, 0, X1):
519 gen_dblaligni(tdest, tsrca, tsrcb, 32);
520 mnemonic = "dblalign4";
521 break;
522 case OE_RRR(DBLALIGN6, 0, X0):
523 case OE_RRR(DBLALIGN6, 0, X1):
524 gen_dblaligni(tdest, tsrca, tsrcb, 48);
525 mnemonic = "dblalign6";
526 break;
527 case OE_RRR(DBLALIGN, 0, X0):
528 gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
529 mnemonic = "dblalign";
530 break;
531 case OE_RRR(EXCH4, 0, X1):
532 case OE_RRR(EXCH, 0, X1):
533 case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
534 case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
535 case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
536 case OE_RRR(FDOUBLE_PACK1, 0, X0):
537 case OE_RRR(FDOUBLE_PACK2, 0, X0):
538 case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
539 case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
540 case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
541 case OE_RRR(FETCHADD4, 0, X1):
542 case OE_RRR(FETCHADDGEZ4, 0, X1):
543 case OE_RRR(FETCHADDGEZ, 0, X1):
544 case OE_RRR(FETCHADD, 0, X1):
545 case OE_RRR(FETCHAND4, 0, X1):
546 case OE_RRR(FETCHAND, 0, X1):
547 case OE_RRR(FETCHOR4, 0, X1):
548 case OE_RRR(FETCHOR, 0, X1):
549 case OE_RRR(FSINGLE_ADD1, 0, X0):
550 case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
551 case OE_RRR(FSINGLE_MUL1, 0, X0):
552 case OE_RRR(FSINGLE_MUL2, 0, X0):
553 case OE_RRR(FSINGLE_PACK2, 0, X0):
554 case OE_RRR(FSINGLE_SUB1, 0, X0):
555 case OE_RRR(MNZ, 0, X0):
556 case OE_RRR(MNZ, 0, X1):
557 case OE_RRR(MNZ, 4, Y0):
558 case OE_RRR(MNZ, 4, Y1):
559 case OE_RRR(MULAX, 0, X0):
560 case OE_RRR(MULAX, 3, Y0):
561 case OE_RRR(MULA_HS_HS, 0, X0):
562 case OE_RRR(MULA_HS_HS, 9, Y0):
563 case OE_RRR(MULA_HS_HU, 0, X0):
564 case OE_RRR(MULA_HS_LS, 0, X0):
565 case OE_RRR(MULA_HS_LU, 0, X0):
566 case OE_RRR(MULA_HU_HU, 0, X0):
567 case OE_RRR(MULA_HU_HU, 9, Y0):
568 case OE_RRR(MULA_HU_LS, 0, X0):
569 case OE_RRR(MULA_HU_LU, 0, X0):
570 case OE_RRR(MULA_LS_LS, 0, X0):
571 case OE_RRR(MULA_LS_LS, 9, Y0):
572 case OE_RRR(MULA_LS_LU, 0, X0):
573 case OE_RRR(MULA_LU_LU, 0, X0):
574 case OE_RRR(MULA_LU_LU, 9, Y0):
575 case OE_RRR(MULX, 0, X0):
576 case OE_RRR(MULX, 3, Y0):
577 case OE_RRR(MUL_HS_HS, 0, X0):
578 case OE_RRR(MUL_HS_HS, 8, Y0):
579 case OE_RRR(MUL_HS_HU, 0, X0):
580 case OE_RRR(MUL_HS_LS, 0, X0):
581 case OE_RRR(MUL_HS_LU, 0, X0):
582 case OE_RRR(MUL_HU_HU, 0, X0):
583 case OE_RRR(MUL_HU_HU, 8, Y0):
584 case OE_RRR(MUL_HU_LS, 0, X0):
585 case OE_RRR(MUL_HU_LU, 0, X0):
586 case OE_RRR(MUL_LS_LS, 0, X0):
587 case OE_RRR(MUL_LS_LS, 8, Y0):
588 case OE_RRR(MUL_LS_LU, 0, X0):
589 case OE_RRR(MUL_LU_LU, 0, X0):
590 case OE_RRR(MUL_LU_LU, 8, Y0):
591 case OE_RRR(MZ, 0, X0):
592 case OE_RRR(MZ, 0, X1):
593 case OE_RRR(MZ, 4, Y0):
594 case OE_RRR(MZ, 4, Y1):
595 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
596 case OE_RRR(NOR, 0, X0):
597 case OE_RRR(NOR, 0, X1):
598 case OE_RRR(NOR, 5, Y0):
599 case OE_RRR(NOR, 5, Y1):
600 tcg_gen_nor_tl(tdest, tsrca, tsrcb);
601 mnemonic = "nor";
602 break;
603 case OE_RRR(OR, 0, X0):
604 case OE_RRR(OR, 0, X1):
605 case OE_RRR(OR, 5, Y0):
606 case OE_RRR(OR, 5, Y1):
607 tcg_gen_or_tl(tdest, tsrca, tsrcb);
608 mnemonic = "or";
609 break;
610 case OE_RRR(ROTL, 0, X0):
611 case OE_RRR(ROTL, 0, X1):
612 case OE_RRR(ROTL, 6, Y0):
613 case OE_RRR(ROTL, 6, Y1):
614 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
615 case OE_RRR(SHL1ADDX, 0, X0):
616 case OE_RRR(SHL1ADDX, 0, X1):
617 case OE_RRR(SHL1ADDX, 7, Y0):
618 case OE_RRR(SHL1ADDX, 7, Y1):
619 tcg_gen_shli_tl(tdest, tsrca, 1);
620 tcg_gen_add_tl(tdest, tdest, tsrcb);
621 tcg_gen_ext32s_tl(tdest, tdest);
622 mnemonic = "shl1addx";
623 break;
624 case OE_RRR(SHL1ADD, 0, X0):
625 case OE_RRR(SHL1ADD, 0, X1):
626 case OE_RRR(SHL1ADD, 1, Y0):
627 case OE_RRR(SHL1ADD, 1, Y1):
628 tcg_gen_shli_tl(tdest, tsrca, 1);
629 tcg_gen_add_tl(tdest, tdest, tsrcb);
630 mnemonic = "shl1add";
631 break;
632 case OE_RRR(SHL2ADDX, 0, X0):
633 case OE_RRR(SHL2ADDX, 0, X1):
634 case OE_RRR(SHL2ADDX, 7, Y0):
635 case OE_RRR(SHL2ADDX, 7, Y1):
636 tcg_gen_shli_tl(tdest, tsrca, 2);
637 tcg_gen_add_tl(tdest, tdest, tsrcb);
638 tcg_gen_ext32s_tl(tdest, tdest);
639 mnemonic = "shl2addx";
640 break;
641 case OE_RRR(SHL2ADD, 0, X0):
642 case OE_RRR(SHL2ADD, 0, X1):
643 case OE_RRR(SHL2ADD, 1, Y0):
644 case OE_RRR(SHL2ADD, 1, Y1):
645 tcg_gen_shli_tl(tdest, tsrca, 2);
646 tcg_gen_add_tl(tdest, tdest, tsrcb);
647 mnemonic = "shl2add";
648 break;
649 case OE_RRR(SHL3ADDX, 0, X0):
650 case OE_RRR(SHL3ADDX, 0, X1):
651 case OE_RRR(SHL3ADDX, 7, Y0):
652 case OE_RRR(SHL3ADDX, 7, Y1):
653 tcg_gen_shli_tl(tdest, tsrca, 3);
654 tcg_gen_add_tl(tdest, tdest, tsrcb);
655 tcg_gen_ext32s_tl(tdest, tdest);
656 mnemonic = "shl3addx";
657 break;
658 case OE_RRR(SHL3ADD, 0, X0):
659 case OE_RRR(SHL3ADD, 0, X1):
660 case OE_RRR(SHL3ADD, 1, Y0):
661 case OE_RRR(SHL3ADD, 1, Y1):
662 tcg_gen_shli_tl(tdest, tsrca, 3);
663 tcg_gen_add_tl(tdest, tdest, tsrcb);
664 mnemonic = "shl3add";
665 break;
666 case OE_RRR(SHLX, 0, X0):
667 case OE_RRR(SHLX, 0, X1):
668 case OE_RRR(SHL, 0, X0):
669 case OE_RRR(SHL, 0, X1):
670 case OE_RRR(SHL, 6, Y0):
671 case OE_RRR(SHL, 6, Y1):
672 case OE_RRR(SHRS, 0, X0):
673 case OE_RRR(SHRS, 0, X1):
674 case OE_RRR(SHRS, 6, Y0):
675 case OE_RRR(SHRS, 6, Y1):
676 case OE_RRR(SHRUX, 0, X0):
677 case OE_RRR(SHRUX, 0, X1):
678 case OE_RRR(SHRU, 0, X0):
679 case OE_RRR(SHRU, 0, X1):
680 case OE_RRR(SHRU, 6, Y0):
681 case OE_RRR(SHRU, 6, Y1):
682 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
683 case OE_RRR(SHUFFLEBYTES, 0, X0):
684 gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
685 mnemonic = "shufflebytes";
686 break;
687 case OE_RRR(SUBXSC, 0, X0):
688 case OE_RRR(SUBXSC, 0, X1):
689 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
690 mnemonic = "subxsc";
691 break;
692 case OE_RRR(SUBX, 0, X0):
693 case OE_RRR(SUBX, 0, X1):
694 case OE_RRR(SUBX, 0, Y0):
695 case OE_RRR(SUBX, 0, Y1):
696 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
697 tcg_gen_ext32s_tl(tdest, tdest);
698 mnemonic = "subx";
699 break;
700 case OE_RRR(SUB, 0, X0):
701 case OE_RRR(SUB, 0, X1):
702 case OE_RRR(SUB, 0, Y0):
703 case OE_RRR(SUB, 0, Y1):
704 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
705 mnemonic = "sub";
706 break;
707 case OE_RRR(V1ADDUC, 0, X0):
708 case OE_RRR(V1ADDUC, 0, X1):
709 case OE_RRR(V1ADD, 0, X0):
710 case OE_RRR(V1ADD, 0, X1):
711 case OE_RRR(V1ADIFFU, 0, X0):
712 case OE_RRR(V1AVGU, 0, X0):
713 case OE_RRR(V1CMPEQ, 0, X0):
714 case OE_RRR(V1CMPEQ, 0, X1):
715 case OE_RRR(V1CMPLES, 0, X0):
716 case OE_RRR(V1CMPLES, 0, X1):
717 case OE_RRR(V1CMPLEU, 0, X0):
718 case OE_RRR(V1CMPLEU, 0, X1):
719 case OE_RRR(V1CMPLTS, 0, X0):
720 case OE_RRR(V1CMPLTS, 0, X1):
721 case OE_RRR(V1CMPLTU, 0, X0):
722 case OE_RRR(V1CMPLTU, 0, X1):
723 case OE_RRR(V1CMPNE, 0, X0):
724 case OE_RRR(V1CMPNE, 0, X1):
725 case OE_RRR(V1DDOTPUA, 0, X0):
726 case OE_RRR(V1DDOTPUSA, 0, X0):
727 case OE_RRR(V1DDOTPUS, 0, X0):
728 case OE_RRR(V1DDOTPU, 0, X0):
729 case OE_RRR(V1DOTPA, 0, X0):
730 case OE_RRR(V1DOTPUA, 0, X0):
731 case OE_RRR(V1DOTPUSA, 0, X0):
732 case OE_RRR(V1DOTPUS, 0, X0):
733 case OE_RRR(V1DOTPU, 0, X0):
734 case OE_RRR(V1DOTP, 0, X0):
735 case OE_RRR(V1INT_H, 0, X0):
736 case OE_RRR(V1INT_H, 0, X1):
737 case OE_RRR(V1INT_L, 0, X0):
738 case OE_RRR(V1INT_L, 0, X1):
739 case OE_RRR(V1MAXU, 0, X0):
740 case OE_RRR(V1MAXU, 0, X1):
741 case OE_RRR(V1MINU, 0, X0):
742 case OE_RRR(V1MINU, 0, X1):
743 case OE_RRR(V1MNZ, 0, X0):
744 case OE_RRR(V1MNZ, 0, X1):
745 case OE_RRR(V1MULTU, 0, X0):
746 case OE_RRR(V1MULUS, 0, X0):
747 case OE_RRR(V1MULU, 0, X0):
748 case OE_RRR(V1MZ, 0, X0):
749 case OE_RRR(V1MZ, 0, X1):
750 case OE_RRR(V1SADAU, 0, X0):
751 case OE_RRR(V1SADU, 0, X0):
752 case OE_RRR(V1SHL, 0, X0):
753 case OE_RRR(V1SHL, 0, X1):
754 case OE_RRR(V1SHRS, 0, X0):
755 case OE_RRR(V1SHRS, 0, X1):
756 case OE_RRR(V1SHRU, 0, X0):
757 case OE_RRR(V1SHRU, 0, X1):
758 case OE_RRR(V1SUBUC, 0, X0):
759 case OE_RRR(V1SUBUC, 0, X1):
760 case OE_RRR(V1SUB, 0, X0):
761 case OE_RRR(V1SUB, 0, X1):
762 case OE_RRR(V2ADDSC, 0, X0):
763 case OE_RRR(V2ADDSC, 0, X1):
764 case OE_RRR(V2ADD, 0, X0):
765 case OE_RRR(V2ADD, 0, X1):
766 case OE_RRR(V2ADIFFS, 0, X0):
767 case OE_RRR(V2AVGS, 0, X0):
768 case OE_RRR(V2CMPEQ, 0, X0):
769 case OE_RRR(V2CMPEQ, 0, X1):
770 case OE_RRR(V2CMPLES, 0, X0):
771 case OE_RRR(V2CMPLES, 0, X1):
772 case OE_RRR(V2CMPLEU, 0, X0):
773 case OE_RRR(V2CMPLEU, 0, X1):
774 case OE_RRR(V2CMPLTS, 0, X0):
775 case OE_RRR(V2CMPLTS, 0, X1):
776 case OE_RRR(V2CMPLTU, 0, X0):
777 case OE_RRR(V2CMPLTU, 0, X1):
778 case OE_RRR(V2CMPNE, 0, X0):
779 case OE_RRR(V2CMPNE, 0, X1):
780 case OE_RRR(V2DOTPA, 0, X0):
781 case OE_RRR(V2DOTP, 0, X0):
782 case OE_RRR(V2INT_H, 0, X0):
783 case OE_RRR(V2INT_H, 0, X1):
784 case OE_RRR(V2INT_L, 0, X0):
785 case OE_RRR(V2INT_L, 0, X1):
786 case OE_RRR(V2MAXS, 0, X0):
787 case OE_RRR(V2MAXS, 0, X1):
788 case OE_RRR(V2MINS, 0, X0):
789 case OE_RRR(V2MINS, 0, X1):
790 case OE_RRR(V2MNZ, 0, X0):
791 case OE_RRR(V2MNZ, 0, X1):
792 case OE_RRR(V2MULFSC, 0, X0):
793 case OE_RRR(V2MULS, 0, X0):
794 case OE_RRR(V2MULTS, 0, X0):
795 case OE_RRR(V2MZ, 0, X0):
796 case OE_RRR(V2MZ, 0, X1):
797 case OE_RRR(V2PACKH, 0, X0):
798 case OE_RRR(V2PACKH, 0, X1):
799 case OE_RRR(V2PACKL, 0, X0):
800 case OE_RRR(V2PACKL, 0, X1):
801 case OE_RRR(V2PACKUC, 0, X0):
802 case OE_RRR(V2PACKUC, 0, X1):
803 case OE_RRR(V2SADAS, 0, X0):
804 case OE_RRR(V2SADAU, 0, X0):
805 case OE_RRR(V2SADS, 0, X0):
806 case OE_RRR(V2SADU, 0, X0):
807 case OE_RRR(V2SHLSC, 0, X0):
808 case OE_RRR(V2SHLSC, 0, X1):
809 case OE_RRR(V2SHL, 0, X0):
810 case OE_RRR(V2SHL, 0, X1):
811 case OE_RRR(V2SHRS, 0, X0):
812 case OE_RRR(V2SHRS, 0, X1):
813 case OE_RRR(V2SHRU, 0, X0):
814 case OE_RRR(V2SHRU, 0, X1):
815 case OE_RRR(V2SUBSC, 0, X0):
816 case OE_RRR(V2SUBSC, 0, X1):
817 case OE_RRR(V2SUB, 0, X0):
818 case OE_RRR(V2SUB, 0, X1):
819 case OE_RRR(V4ADDSC, 0, X0):
820 case OE_RRR(V4ADDSC, 0, X1):
821 case OE_RRR(V4ADD, 0, X0):
822 case OE_RRR(V4ADD, 0, X1):
823 case OE_RRR(V4INT_H, 0, X0):
824 case OE_RRR(V4INT_H, 0, X1):
825 case OE_RRR(V4INT_L, 0, X0):
826 case OE_RRR(V4INT_L, 0, X1):
827 case OE_RRR(V4PACKSC, 0, X0):
828 case OE_RRR(V4PACKSC, 0, X1):
829 case OE_RRR(V4SHLSC, 0, X0):
830 case OE_RRR(V4SHLSC, 0, X1):
831 case OE_RRR(V4SHL, 0, X0):
832 case OE_RRR(V4SHL, 0, X1):
833 case OE_RRR(V4SHRS, 0, X0):
834 case OE_RRR(V4SHRS, 0, X1):
835 case OE_RRR(V4SHRU, 0, X0):
836 case OE_RRR(V4SHRU, 0, X1):
837 case OE_RRR(V4SUBSC, 0, X0):
838 case OE_RRR(V4SUBSC, 0, X1):
839 case OE_RRR(V4SUB, 0, X0):
840 case OE_RRR(V4SUB, 0, X1):
841 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
842 case OE_RRR(XOR, 0, X0):
843 case OE_RRR(XOR, 0, X1):
844 case OE_RRR(XOR, 5, Y0):
845 case OE_RRR(XOR, 5, Y1):
846 tcg_gen_xor_tl(tdest, tsrca, tsrcb);
847 mnemonic = "xor";
848 break;
849 default:
850 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
853 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
854 reg_names[dest], reg_names[srca], reg_names[srcb]);
855 return TILEGX_EXCP_NONE;
858 static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
859 unsigned dest, unsigned srca, int imm)
861 TCGv tdest = dest_gr(dc, dest);
862 TCGv tsrca = load_gr(dc, srca);
863 const char *mnemonic;
864 TCGMemOp memop;
866 switch (opext) {
867 case OE(ADDI_OPCODE_Y0, 0, Y0):
868 case OE(ADDI_OPCODE_Y1, 0, Y1):
869 case OE_IM(ADDI, X0):
870 case OE_IM(ADDI, X1):
871 tcg_gen_addi_tl(tdest, tsrca, imm);
872 mnemonic = "addi";
873 break;
874 case OE(ADDXI_OPCODE_Y0, 0, Y0):
875 case OE(ADDXI_OPCODE_Y1, 0, Y1):
876 case OE_IM(ADDXI, X0):
877 case OE_IM(ADDXI, X1):
878 tcg_gen_addi_tl(tdest, tsrca, imm);
879 tcg_gen_ext32s_tl(tdest, tdest);
880 mnemonic = "addxi";
881 break;
882 case OE(ANDI_OPCODE_Y0, 0, Y0):
883 case OE(ANDI_OPCODE_Y1, 0, Y1):
884 case OE_IM(ANDI, X0):
885 case OE_IM(ANDI, X1):
886 tcg_gen_andi_tl(tdest, tsrca, imm);
887 mnemonic = "andi";
888 break;
889 case OE_IM(CMPEQI, X0):
890 case OE_IM(CMPEQI, X1):
891 case OE_IM(CMPLTSI, X0):
892 case OE_IM(CMPLTSI, X1):
893 case OE_IM(CMPLTUI, X0):
894 case OE_IM(CMPLTUI, X1):
895 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
896 case OE_IM(LD1S_ADD, X1):
897 memop = MO_SB;
898 mnemonic = "ld1s_add";
899 goto do_load_add;
900 case OE_IM(LD1U_ADD, X1):
901 memop = MO_UB;
902 mnemonic = "ld1u_add";
903 goto do_load_add;
904 case OE_IM(LD2S_ADD, X1):
905 memop = MO_TESW;
906 mnemonic = "ld2s_add";
907 goto do_load_add;
908 case OE_IM(LD2U_ADD, X1):
909 memop = MO_TEUW;
910 mnemonic = "ld2u_add";
911 goto do_load_add;
912 case OE_IM(LD4S_ADD, X1):
913 memop = MO_TESL;
914 mnemonic = "ld4s_add";
915 goto do_load_add;
916 case OE_IM(LD4U_ADD, X1):
917 memop = MO_TEUL;
918 mnemonic = "ld4u_add";
919 goto do_load_add;
920 case OE_IM(LDNT1S_ADD, X1):
921 memop = MO_SB;
922 mnemonic = "ldnt1s_add";
923 goto do_load_add;
924 case OE_IM(LDNT1U_ADD, X1):
925 memop = MO_UB;
926 mnemonic = "ldnt1u_add";
927 goto do_load_add;
928 case OE_IM(LDNT2S_ADD, X1):
929 memop = MO_TESW;
930 mnemonic = "ldnt2s_add";
931 goto do_load_add;
932 case OE_IM(LDNT2U_ADD, X1):
933 memop = MO_TEUW;
934 mnemonic = "ldnt2u_add";
935 goto do_load_add;
936 case OE_IM(LDNT4S_ADD, X1):
937 memop = MO_TESL;
938 mnemonic = "ldnt4s_add";
939 goto do_load_add;
940 case OE_IM(LDNT4U_ADD, X1):
941 memop = MO_TEUL;
942 mnemonic = "ldnt4u_add";
943 goto do_load_add;
944 case OE_IM(LDNT_ADD, X1):
945 memop = MO_TEQ;
946 mnemonic = "ldnt_add";
947 goto do_load_add;
948 case OE_IM(LD_ADD, X1):
949 memop = MO_TEQ;
950 mnemonic = "ldnt_add";
951 do_load_add:
952 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
953 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
954 break;
955 case OE_IM(LDNA_ADD, X1):
956 tcg_gen_andi_tl(tdest, tsrca, ~7);
957 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
958 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
959 mnemonic = "ldna_add";
960 break;
961 case OE_IM(MFSPR, X1):
962 case OE_IM(MTSPR, X1):
963 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
964 case OE_IM(ORI, X0):
965 case OE_IM(ORI, X1):
966 tcg_gen_ori_tl(tdest, tsrca, imm);
967 mnemonic = "ori";
968 break;
969 case OE_IM(V1ADDI, X0):
970 case OE_IM(V1ADDI, X1):
971 case OE_IM(V1CMPEQI, X0):
972 case OE_IM(V1CMPEQI, X1):
973 case OE_IM(V1CMPLTSI, X0):
974 case OE_IM(V1CMPLTSI, X1):
975 case OE_IM(V1CMPLTUI, X0):
976 case OE_IM(V1CMPLTUI, X1):
977 case OE_IM(V1MAXUI, X0):
978 case OE_IM(V1MAXUI, X1):
979 case OE_IM(V1MINUI, X0):
980 case OE_IM(V1MINUI, X1):
981 case OE_IM(V2ADDI, X0):
982 case OE_IM(V2ADDI, X1):
983 case OE_IM(V2CMPEQI, X0):
984 case OE_IM(V2CMPEQI, X1):
985 case OE_IM(V2CMPLTSI, X0):
986 case OE_IM(V2CMPLTSI, X1):
987 case OE_IM(V2CMPLTUI, X0):
988 case OE_IM(V2CMPLTUI, X1):
989 case OE_IM(V2MAXSI, X0):
990 case OE_IM(V2MAXSI, X1):
991 case OE_IM(V2MINSI, X0):
992 case OE_IM(V2MINSI, X1):
993 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
994 case OE_IM(XORI, X0):
995 case OE_IM(XORI, X1):
996 tcg_gen_xori_tl(tdest, tsrca, imm);
997 mnemonic = "xori";
998 break;
1000 case OE_SH(ROTLI, X0):
1001 case OE_SH(ROTLI, X1):
1002 case OE_SH(ROTLI, Y0):
1003 case OE_SH(ROTLI, Y1):
1004 case OE_SH(SHLI, X0):
1005 case OE_SH(SHLI, X1):
1006 case OE_SH(SHLI, Y0):
1007 case OE_SH(SHLI, Y1):
1008 case OE_SH(SHLXI, X0):
1009 case OE_SH(SHLXI, X1):
1010 case OE_SH(SHRSI, X0):
1011 case OE_SH(SHRSI, X1):
1012 case OE_SH(SHRSI, Y0):
1013 case OE_SH(SHRSI, Y1):
1014 case OE_SH(SHRUI, X0):
1015 case OE_SH(SHRUI, X1):
1016 case OE_SH(SHRUI, Y0):
1017 case OE_SH(SHRUI, Y1):
1018 case OE_SH(SHRUXI, X0):
1019 case OE_SH(SHRUXI, X1):
1020 case OE_SH(V1SHLI, X0):
1021 case OE_SH(V1SHLI, X1):
1022 case OE_SH(V1SHRSI, X0):
1023 case OE_SH(V1SHRSI, X1):
1024 case OE_SH(V1SHRUI, X0):
1025 case OE_SH(V1SHRUI, X1):
1026 case OE_SH(V2SHLI, X0):
1027 case OE_SH(V2SHLI, X1):
1028 case OE_SH(V2SHRSI, X0):
1029 case OE_SH(V2SHRSI, X1):
1030 case OE_SH(V2SHRUI, X0):
1031 case OE_SH(V2SHRUI, X1):
1032 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1034 case OE(ADDLI_OPCODE_X0, 0, X0):
1035 case OE(ADDLI_OPCODE_X1, 0, X1):
1036 tcg_gen_addi_tl(tdest, tsrca, imm);
1037 mnemonic = "addli";
1038 break;
1039 case OE(ADDXLI_OPCODE_X0, 0, X0):
1040 case OE(ADDXLI_OPCODE_X1, 0, X1):
1041 tcg_gen_addi_tl(tdest, tsrca, imm);
1042 tcg_gen_ext32s_tl(tdest, tdest);
1043 mnemonic = "addxli";
1044 break;
1045 case OE(CMPEQI_OPCODE_Y0, 0, Y0):
1046 case OE(CMPEQI_OPCODE_Y1, 0, Y1):
1047 case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
1048 case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
1049 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1050 case OE(SHL16INSLI_OPCODE_X0, 0, X0):
1051 case OE(SHL16INSLI_OPCODE_X1, 0, X1):
1052 tcg_gen_shli_tl(tdest, tsrca, 16);
1053 tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
1054 mnemonic = "shl16insli";
1055 break;
1057 default:
1058 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1061 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
1062 reg_names[dest], reg_names[srca], imm);
1063 return TILEGX_EXCP_NONE;
1066 static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
1067 unsigned dest, unsigned srca,
1068 unsigned bfs, unsigned bfe)
1070 const char *mnemonic;
1072 switch (ext) {
1073 case BFEXTU_BF_OPCODE_X0:
1074 case BFEXTS_BF_OPCODE_X0:
1075 case BFINS_BF_OPCODE_X0:
1076 case MM_BF_OPCODE_X0:
1077 default:
1078 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1081 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
1082 reg_names[dest], reg_names[srca], bfs, bfe);
1083 return TILEGX_EXCP_NONE;
1086 static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
1087 unsigned srca, int off)
1089 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1090 const char *mnemonic;
1092 dc->jmp.dest = tcg_const_tl(tgt);
1093 dc->jmp.val1 = tcg_temp_new();
1094 tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca));
1096 /* Note that the "predict taken" opcodes have bit 0 clear.
1097 Therefore, fold the two cases together by setting bit 0. */
1098 switch (ext | 1) {
1099 case BEQZ_BRANCH_OPCODE_X1:
1100 dc->jmp.cond = TCG_COND_EQ;
1101 mnemonic = "beqz";
1102 break;
1103 case BNEZ_BRANCH_OPCODE_X1:
1104 dc->jmp.cond = TCG_COND_NE;
1105 mnemonic = "bnez";
1106 break;
1107 case BGEZ_BRANCH_OPCODE_X1:
1108 dc->jmp.cond = TCG_COND_GE;
1109 mnemonic = "bgez";
1110 break;
1111 case BGTZ_BRANCH_OPCODE_X1:
1112 dc->jmp.cond = TCG_COND_GT;
1113 mnemonic = "bgtz";
1114 break;
1115 case BLEZ_BRANCH_OPCODE_X1:
1116 dc->jmp.cond = TCG_COND_LE;
1117 mnemonic = "blez";
1118 break;
1119 case BLTZ_BRANCH_OPCODE_X1:
1120 dc->jmp.cond = TCG_COND_LT;
1121 mnemonic = "bltz";
1122 break;
1123 case BLBC_BRANCH_OPCODE_X1:
1124 dc->jmp.cond = TCG_COND_EQ;
1125 tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1126 mnemonic = "blbc";
1127 break;
1128 case BLBS_BRANCH_OPCODE_X1:
1129 dc->jmp.cond = TCG_COND_NE;
1130 tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1131 mnemonic = "blbs";
1132 break;
1133 default:
1134 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1137 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1138 qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>",
1139 mnemonic, ext & 1 ? "" : "t",
1140 reg_names[srca], tgt, lookup_symbol(tgt));
1142 return TILEGX_EXCP_NONE;
1145 static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
1147 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1148 const char *mnemonic = "j";
1150 /* The extension field is 1 bit, therefore we only have JAL and J. */
1151 if (ext == JAL_JUMP_OPCODE_X1) {
1152 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
1153 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1154 mnemonic = "jal";
1156 dc->jmp.cond = TCG_COND_ALWAYS;
1157 dc->jmp.dest = tcg_const_tl(tgt);
1159 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1160 qemu_log("%s " TARGET_FMT_lx " <%s>",
1161 mnemonic, tgt, lookup_symbol(tgt));
1163 return TILEGX_EXCP_NONE;
1166 static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
1168 unsigned opc = get_Opcode_Y0(bundle);
1169 unsigned ext = get_RRROpcodeExtension_Y0(bundle);
1170 unsigned dest = get_Dest_Y0(bundle);
1171 unsigned srca = get_SrcA_Y0(bundle);
1172 unsigned srcb;
1173 int imm;
1175 switch (opc) {
1176 case RRR_1_OPCODE_Y0:
1177 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1178 ext = get_UnaryOpcodeExtension_Y0(bundle);
1179 return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca);
1181 /* fallthru */
1182 case RRR_0_OPCODE_Y0:
1183 case RRR_2_OPCODE_Y0:
1184 case RRR_3_OPCODE_Y0:
1185 case RRR_4_OPCODE_Y0:
1186 case RRR_5_OPCODE_Y0:
1187 case RRR_6_OPCODE_Y0:
1188 case RRR_7_OPCODE_Y0:
1189 case RRR_8_OPCODE_Y0:
1190 case RRR_9_OPCODE_Y0:
1191 srcb = get_SrcB_Y0(bundle);
1192 return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
1194 case SHIFT_OPCODE_Y0:
1195 ext = get_ShiftOpcodeExtension_Y0(bundle);
1196 imm = get_ShAmt_Y0(bundle);
1197 return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
1199 case ADDI_OPCODE_Y0:
1200 case ADDXI_OPCODE_Y0:
1201 case ANDI_OPCODE_Y0:
1202 case CMPEQI_OPCODE_Y0:
1203 case CMPLTSI_OPCODE_Y0:
1204 imm = (int8_t)get_Imm8_Y0(bundle);
1205 return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
1207 default:
1208 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1212 static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
1214 unsigned opc = get_Opcode_Y1(bundle);
1215 unsigned ext = get_RRROpcodeExtension_Y1(bundle);
1216 unsigned dest = get_Dest_Y1(bundle);
1217 unsigned srca = get_SrcA_Y1(bundle);
1218 unsigned srcb;
1219 int imm;
1221 switch (get_Opcode_Y1(bundle)) {
1222 case RRR_1_OPCODE_Y1:
1223 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1224 ext = get_UnaryOpcodeExtension_Y1(bundle);
1225 return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca);
1227 /* fallthru */
1228 case RRR_0_OPCODE_Y1:
1229 case RRR_2_OPCODE_Y1:
1230 case RRR_3_OPCODE_Y1:
1231 case RRR_4_OPCODE_Y1:
1232 case RRR_5_OPCODE_Y1:
1233 case RRR_6_OPCODE_Y1:
1234 case RRR_7_OPCODE_Y1:
1235 srcb = get_SrcB_Y1(bundle);
1236 return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
1238 case SHIFT_OPCODE_Y1:
1239 ext = get_ShiftOpcodeExtension_Y1(bundle);
1240 imm = get_ShAmt_Y1(bundle);
1241 return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
1243 case ADDI_OPCODE_Y1:
1244 case ADDXI_OPCODE_Y1:
1245 case ANDI_OPCODE_Y1:
1246 case CMPEQI_OPCODE_Y1:
1247 case CMPLTSI_OPCODE_Y1:
1248 imm = (int8_t)get_Imm8_Y1(bundle);
1249 return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
1251 default:
1252 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1256 static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
1258 unsigned mode = get_Mode(bundle);
1259 unsigned opc = get_Opcode_Y2(bundle);
1260 unsigned srca = get_SrcA_Y2(bundle);
1261 unsigned srcbdest = get_SrcBDest_Y2(bundle);
1262 const char *mnemonic;
1263 TCGMemOp memop;
1265 switch (OEY2(opc, mode)) {
1266 case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
1267 memop = MO_SB;
1268 mnemonic = "ld1s";
1269 goto do_load;
1270 case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
1271 memop = MO_UB;
1272 mnemonic = "ld1u";
1273 goto do_load;
1274 case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
1275 memop = MO_TESW;
1276 mnemonic = "ld2s";
1277 goto do_load;
1278 case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
1279 memop = MO_TEUW;
1280 mnemonic = "ld2u";
1281 goto do_load;
1282 case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
1283 memop = MO_TESL;
1284 mnemonic = "ld4s";
1285 goto do_load;
1286 case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
1287 memop = MO_TEUL;
1288 mnemonic = "ld4u";
1289 goto do_load;
1290 case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
1291 memop = MO_TEQ;
1292 mnemonic = "ld";
1293 do_load:
1294 tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
1295 dc->mmuidx, memop);
1296 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
1297 reg_names[srcbdest], reg_names[srca]);
1298 return TILEGX_EXCP_NONE;
1300 case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
1301 return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
1302 case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
1303 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
1304 case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
1305 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
1306 case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
1307 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
1309 default:
1310 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1314 static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
1316 unsigned opc = get_Opcode_X0(bundle);
1317 unsigned dest = get_Dest_X0(bundle);
1318 unsigned srca = get_SrcA_X0(bundle);
1319 unsigned ext, srcb, bfs, bfe;
1320 int imm;
1322 switch (opc) {
1323 case RRR_0_OPCODE_X0:
1324 ext = get_RRROpcodeExtension_X0(bundle);
1325 if (ext == UNARY_RRR_0_OPCODE_X0) {
1326 ext = get_UnaryOpcodeExtension_X0(bundle);
1327 return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca);
1329 srcb = get_SrcB_X0(bundle);
1330 return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
1332 case SHIFT_OPCODE_X0:
1333 ext = get_ShiftOpcodeExtension_X0(bundle);
1334 imm = get_ShAmt_X0(bundle);
1335 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1337 case IMM8_OPCODE_X0:
1338 ext = get_Imm8OpcodeExtension_X0(bundle);
1339 imm = (int8_t)get_Imm8_X0(bundle);
1340 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1342 case BF_OPCODE_X0:
1343 ext = get_BFOpcodeExtension_X0(bundle);
1344 bfs = get_BFStart_X0(bundle);
1345 bfe = get_BFEnd_X0(bundle);
1346 return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
1348 case ADDLI_OPCODE_X0:
1349 case SHL16INSLI_OPCODE_X0:
1350 case ADDXLI_OPCODE_X0:
1351 imm = (int16_t)get_Imm16_X0(bundle);
1352 return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
1354 default:
1355 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1359 static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
1361 unsigned opc = get_Opcode_X1(bundle);
1362 unsigned dest = get_Dest_X1(bundle);
1363 unsigned srca = get_SrcA_X1(bundle);
1364 unsigned ext, srcb;
1365 int imm;
1367 switch (opc) {
1368 case RRR_0_OPCODE_X1:
1369 ext = get_RRROpcodeExtension_X1(bundle);
1370 srcb = get_SrcB_X1(bundle);
1371 switch (ext) {
1372 case UNARY_RRR_0_OPCODE_X1:
1373 ext = get_UnaryOpcodeExtension_X1(bundle);
1374 return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca);
1375 case ST1_RRR_0_OPCODE_X1:
1376 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
1377 case ST2_RRR_0_OPCODE_X1:
1378 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
1379 case ST4_RRR_0_OPCODE_X1:
1380 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
1381 case STNT1_RRR_0_OPCODE_X1:
1382 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
1383 case STNT2_RRR_0_OPCODE_X1:
1384 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
1385 case STNT4_RRR_0_OPCODE_X1:
1386 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
1387 case STNT_RRR_0_OPCODE_X1:
1388 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
1389 case ST_RRR_0_OPCODE_X1:
1390 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
1392 return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
1394 case SHIFT_OPCODE_X1:
1395 ext = get_ShiftOpcodeExtension_X1(bundle);
1396 imm = get_ShAmt_X1(bundle);
1397 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1399 case IMM8_OPCODE_X1:
1400 ext = get_Imm8OpcodeExtension_X1(bundle);
1401 imm = (int8_t)get_Dest_Imm8_X1(bundle);
1402 srcb = get_SrcB_X1(bundle);
1403 switch (ext) {
1404 case ST1_ADD_IMM8_OPCODE_X1:
1405 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
1406 case ST2_ADD_IMM8_OPCODE_X1:
1407 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
1408 case ST4_ADD_IMM8_OPCODE_X1:
1409 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
1410 case STNT1_ADD_IMM8_OPCODE_X1:
1411 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
1412 case STNT2_ADD_IMM8_OPCODE_X1:
1413 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
1414 case STNT4_ADD_IMM8_OPCODE_X1:
1415 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
1416 case STNT_ADD_IMM8_OPCODE_X1:
1417 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
1418 case ST_ADD_IMM8_OPCODE_X1:
1419 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
1421 imm = (int8_t)get_Imm8_X1(bundle);
1422 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1424 case BRANCH_OPCODE_X1:
1425 ext = get_BrType_X1(bundle);
1426 imm = sextract32(get_BrOff_X1(bundle), 0, 17);
1427 return gen_branch_opcode_x1(dc, ext, srca, imm);
1429 case JUMP_OPCODE_X1:
1430 ext = get_JumpOpcodeExtension_X1(bundle);
1431 imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
1432 return gen_jump_opcode_x1(dc, ext, imm);
1434 case ADDLI_OPCODE_X1:
1435 case SHL16INSLI_OPCODE_X1:
1436 case ADDXLI_OPCODE_X1:
1437 imm = (int16_t)get_Imm16_X1(bundle);
1438 return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
1440 default:
1441 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1445 static void notice_excp(DisasContext *dc, uint64_t bundle,
1446 const char *type, TileExcp excp)
1448 if (likely(excp == TILEGX_EXCP_NONE)) {
1449 return;
1451 gen_exception(dc, excp);
1452 if (excp == TILEGX_EXCP_OPCODE_UNIMPLEMENTED) {
1453 qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
1457 static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
1459 int i;
1461 for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
1462 DisasContextTemp *wb = &dc->wb[i];
1463 wb->reg = TILEGX_R_NOREG;
1464 TCGV_UNUSED_I64(wb->val);
1466 dc->num_wb = 0;
1468 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1469 tcg_gen_debug_insn_start(dc->pc);
1472 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
1473 if (get_Mode(bundle)) {
1474 notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
1475 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1476 notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
1477 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1478 notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
1479 } else {
1480 notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
1481 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1482 notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
1484 qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
1486 for (i = dc->num_wb - 1; i >= 0; --i) {
1487 DisasContextTemp *wb = &dc->wb[i];
1488 if (wb->reg < TILEGX_R_COUNT) {
1489 tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
1491 tcg_temp_free_i64(wb->val);
1494 if (dc->jmp.cond != TCG_COND_NEVER) {
1495 if (dc->jmp.cond == TCG_COND_ALWAYS) {
1496 tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
1497 } else {
1498 TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1499 tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
1500 dc->jmp.val1, load_zero(dc),
1501 dc->jmp.dest, next);
1502 tcg_temp_free_i64(dc->jmp.val1);
1503 tcg_temp_free_i64(next);
1505 tcg_temp_free_i64(dc->jmp.dest);
1506 tcg_gen_exit_tb(0);
1507 dc->exit_tb = true;
1511 static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
1512 TranslationBlock *tb,
1513 bool search_pc)
1515 DisasContext ctx;
1516 DisasContext *dc = &ctx;
1517 CPUState *cs = CPU(cpu);
1518 CPUTLGState *env = &cpu->env;
1519 uint64_t pc_start = tb->pc;
1520 uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1521 int j, lj = -1;
1522 int num_insns = 0;
1523 int max_insns = tb->cflags & CF_COUNT_MASK;
1525 dc->pc = pc_start;
1526 dc->mmuidx = 0;
1527 dc->exit_tb = false;
1528 dc->jmp.cond = TCG_COND_NEVER;
1529 TCGV_UNUSED_I64(dc->jmp.dest);
1530 TCGV_UNUSED_I64(dc->jmp.val1);
1531 TCGV_UNUSED_I64(dc->zero);
1533 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1534 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1536 if (!max_insns) {
1537 max_insns = CF_COUNT_MASK;
1539 if (cs->singlestep_enabled || singlestep) {
1540 max_insns = 1;
1542 gen_tb_start(tb);
1544 while (1) {
1545 if (search_pc) {
1546 j = tcg_op_buf_count();
1547 if (lj < j) {
1548 lj++;
1549 while (lj < j) {
1550 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1553 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1554 tcg_ctx.gen_opc_instr_start[lj] = 1;
1555 tcg_ctx.gen_opc_icount[lj] = num_insns;
1557 translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
1559 if (dc->exit_tb) {
1560 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
1561 break;
1563 dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
1564 if (++num_insns >= max_insns
1565 || dc->pc >= next_page_start
1566 || tcg_op_buf_full()) {
1567 /* Ending the TB due to TB size or page boundary. Set PC. */
1568 tcg_gen_movi_tl(cpu_pc, dc->pc);
1569 tcg_gen_exit_tb(0);
1570 break;
1574 gen_tb_end(tb, num_insns);
1575 if (search_pc) {
1576 j = tcg_op_buf_count();
1577 lj++;
1578 while (lj <= j) {
1579 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1581 } else {
1582 tb->size = dc->pc - pc_start;
1583 tb->icount = num_insns;
1586 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
1589 void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
1591 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, false);
1594 void gen_intermediate_code_pc(CPUTLGState *env, struct TranslationBlock *tb)
1596 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, true);
1599 void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, int pc_pos)
1601 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1604 void tilegx_tcg_init(void)
1606 int i;
1608 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1609 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUTLGState, pc), "pc");
1610 for (i = 0; i < TILEGX_R_COUNT; i++) {
1611 cpu_regs[i] = tcg_global_mem_new_i64(TCG_AREG0,
1612 offsetof(CPUTLGState, regs[i]),
1613 reg_names[i]);