target-tilegx: Handle comparison instructions
[qemu/ar7.git] / target-tilegx / translate.c
blobf2dd8244672c3c1fa2a170c8cb27c6216f594702
1 /*
2 * QEMU TILE-Gx CPU
4 * Copyright (c) 2015 Chen Gang
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "opcode_tilegx.h"
28 #define FMT64X "%016" PRIx64
30 static TCGv_ptr cpu_env;
31 static TCGv cpu_pc;
32 static TCGv cpu_regs[TILEGX_R_COUNT];
34 static const char * const reg_names[64] = {
35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
39 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
40 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
41 "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
42 "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
45 /* Modified registers are cached in temporaries until the end of the bundle. */
46 typedef struct {
47 unsigned reg;
48 TCGv val;
49 } DisasContextTemp;
51 #define MAX_WRITEBACK 4
53 /* This is the state at translation time. */
54 typedef struct {
55 uint64_t pc; /* Current pc */
57 TCGv zero; /* For zero register */
59 DisasContextTemp wb[MAX_WRITEBACK];
60 int num_wb;
61 int mmuidx;
62 bool exit_tb;
64 struct {
65 TCGCond cond; /* branch condition */
66 TCGv dest; /* branch destination */
67 TCGv val1; /* value to be compared against zero, for cond */
68 } jmp; /* Jump object, only once in each TB block */
69 } DisasContext;
71 #include "exec/gen-icount.h"
73 /* Differentiate the various pipe encodings. */
74 #define TY_X0 0
75 #define TY_X1 1
76 #define TY_Y0 2
77 #define TY_Y1 3
79 /* Remerge the base opcode and extension fields for switching.
80 The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
81 Y2 opcode field is 2 bits. */
82 #define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
84 /* Similar, but for Y2 only. */
85 #define OEY2(OP, MODE) (OP + MODE * 4)
87 /* Similar, but make sure opcode names match up. */
88 #define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
89 #define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
90 #define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
91 #define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
92 #define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
93 #define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
94 #define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
97 static void gen_exception(DisasContext *dc, TileExcp num)
99 TCGv_i32 tmp;
101 tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
103 tmp = tcg_const_i32(num);
104 gen_helper_exception(cpu_env, tmp);
105 tcg_temp_free_i32(tmp);
106 dc->exit_tb = true;
109 static bool check_gr(DisasContext *dc, uint8_t reg)
111 if (likely(reg < TILEGX_R_COUNT)) {
112 return true;
115 switch (reg) {
116 case TILEGX_R_SN:
117 case TILEGX_R_ZERO:
118 break;
119 case TILEGX_R_IDN0:
120 case TILEGX_R_IDN1:
121 gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
122 break;
123 case TILEGX_R_UDN0:
124 case TILEGX_R_UDN1:
125 case TILEGX_R_UDN2:
126 case TILEGX_R_UDN3:
127 gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
128 break;
129 default:
130 g_assert_not_reached();
132 return false;
135 static TCGv load_zero(DisasContext *dc)
137 if (TCGV_IS_UNUSED_I64(dc->zero)) {
138 dc->zero = tcg_const_i64(0);
140 return dc->zero;
143 static TCGv load_gr(DisasContext *dc, unsigned reg)
145 if (check_gr(dc, reg)) {
146 return cpu_regs[reg];
148 return load_zero(dc);
151 static TCGv dest_gr(DisasContext *dc, unsigned reg)
153 int n;
155 /* Skip the result, mark the exception if necessary, and continue */
156 check_gr(dc, reg);
158 n = dc->num_wb++;
159 dc->wb[n].reg = reg;
160 return dc->wb[n].val = tcg_temp_new_i64();
163 static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
164 void (*operate)(TCGv, TCGv, TCGv))
166 TCGv t0 = tcg_temp_new();
168 tcg_gen_ext32s_tl(tdest, tsrca);
169 tcg_gen_ext32s_tl(t0, tsrcb);
170 operate(tdest, tdest, t0);
172 tcg_gen_movi_tl(t0, 0x7fffffff);
173 tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
174 tcg_gen_movi_tl(t0, -0x80000000LL);
175 tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
177 tcg_temp_free(t0);
180 /* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
181 specified by the bottom 3 bits of TSRCB, and set TDEST to the
182 low 64 bits of the resulting value. */
183 static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
185 TCGv t0 = tcg_temp_new();
187 tcg_gen_andi_tl(t0, tsrcb, 7);
188 tcg_gen_shli_tl(t0, t0, 3);
189 tcg_gen_shr_tl(tdest, tsrcd, t0);
191 /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
192 arithmetic on a 6-bit field tells us that 64 - t0 is equal
193 to (t0 ^ 63) + 1. So we can do the shift in two parts,
194 neither of which will be an invalid shift by 64. */
195 tcg_gen_xori_tl(t0, t0, 63);
196 tcg_gen_shl_tl(t0, tsrca, t0);
197 tcg_gen_shli_tl(t0, t0, 1);
198 tcg_gen_or_tl(tdest, tdest, t0);
200 tcg_temp_free(t0);
203 /* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
204 right shift is an immediate. */
205 static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
207 TCGv t0 = tcg_temp_new();
209 tcg_gen_shri_tl(t0, tsrcb, shr);
210 tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
211 tcg_gen_or_tl(tdest, tdest, t0);
213 tcg_temp_free(t0);
216 static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
217 unsigned srcb, TCGMemOp memop, const char *name)
219 if (dest) {
220 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
223 tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
224 dc->mmuidx, memop);
226 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
227 reg_names[srca], reg_names[srcb]);
228 return TILEGX_EXCP_NONE;
231 static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
232 int imm, TCGMemOp memop, const char *name)
234 TCGv tsrca = load_gr(dc, srca);
235 TCGv tsrcb = load_gr(dc, srcb);
237 tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
238 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
240 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
241 reg_names[srca], reg_names[srcb], imm);
242 return TILEGX_EXCP_NONE;
245 static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
246 unsigned dest, unsigned srca)
248 TCGv tdest, tsrca;
249 const char *mnemonic;
250 TCGMemOp memop;
252 /* Eliminate nops and jumps before doing anything else. */
253 switch (opext) {
254 case OE_RR_Y0(NOP):
255 case OE_RR_Y1(NOP):
256 case OE_RR_X0(NOP):
257 case OE_RR_X1(NOP):
258 mnemonic = "nop";
259 goto do_nop;
260 case OE_RR_Y0(FNOP):
261 case OE_RR_Y1(FNOP):
262 case OE_RR_X0(FNOP):
263 case OE_RR_X1(FNOP):
264 mnemonic = "fnop";
265 do_nop:
266 if (srca || dest) {
267 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
269 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
270 return TILEGX_EXCP_NONE;
272 case OE_RR_X1(JRP):
273 case OE_RR_Y1(JRP):
274 mnemonic = "jrp";
275 goto do_jr;
276 case OE_RR_X1(JR):
277 case OE_RR_Y1(JR):
278 mnemonic = "jr";
279 goto do_jr;
280 case OE_RR_X1(JALRP):
281 case OE_RR_Y1(JALRP):
282 mnemonic = "jalrp";
283 goto do_jalr;
284 case OE_RR_X1(JALR):
285 case OE_RR_Y1(JALR):
286 mnemonic = "jalr";
287 do_jalr:
288 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
289 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
290 do_jr:
291 dc->jmp.cond = TCG_COND_ALWAYS;
292 dc->jmp.dest = tcg_temp_new();
293 tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
294 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
295 return TILEGX_EXCP_NONE;
298 tdest = dest_gr(dc, dest);
299 tsrca = load_gr(dc, srca);
301 switch (opext) {
302 case OE_RR_X0(CNTLZ):
303 case OE_RR_Y0(CNTLZ):
304 gen_helper_cntlz(tdest, tsrca);
305 mnemonic = "cntlz";
306 break;
307 case OE_RR_X0(CNTTZ):
308 case OE_RR_Y0(CNTTZ):
309 gen_helper_cnttz(tdest, tsrca);
310 mnemonic = "cnttz";
311 break;
312 case OE_RR_X1(DRAIN):
313 case OE_RR_X1(DTLBPR):
314 case OE_RR_X1(FINV):
315 case OE_RR_X1(FLUSHWB):
316 case OE_RR_X1(FLUSH):
317 case OE_RR_X0(FSINGLE_PACK1):
318 case OE_RR_Y0(FSINGLE_PACK1):
319 case OE_RR_X1(ICOH):
320 case OE_RR_X1(ILL):
321 case OE_RR_Y1(ILL):
322 case OE_RR_X1(INV):
323 case OE_RR_X1(IRET):
324 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
325 case OE_RR_X1(LD1S):
326 memop = MO_SB;
327 mnemonic = "ld1s";
328 goto do_load;
329 case OE_RR_X1(LD1U):
330 memop = MO_UB;
331 mnemonic = "ld1u";
332 goto do_load;
333 case OE_RR_X1(LD2S):
334 memop = MO_TESW;
335 mnemonic = "ld2s";
336 goto do_load;
337 case OE_RR_X1(LD2U):
338 memop = MO_TEUW;
339 mnemonic = "ld2u";
340 goto do_load;
341 case OE_RR_X1(LD4S):
342 memop = MO_TESL;
343 mnemonic = "ld4s";
344 goto do_load;
345 case OE_RR_X1(LD4U):
346 memop = MO_TEUL;
347 mnemonic = "ld4u";
348 goto do_load;
349 case OE_RR_X1(LDNT1S):
350 memop = MO_SB;
351 mnemonic = "ldnt1s";
352 goto do_load;
353 case OE_RR_X1(LDNT1U):
354 memop = MO_UB;
355 mnemonic = "ldnt1u";
356 goto do_load;
357 case OE_RR_X1(LDNT2S):
358 memop = MO_TESW;
359 mnemonic = "ldnt2s";
360 goto do_load;
361 case OE_RR_X1(LDNT2U):
362 memop = MO_TEUW;
363 mnemonic = "ldnt2u";
364 goto do_load;
365 case OE_RR_X1(LDNT4S):
366 memop = MO_TESL;
367 mnemonic = "ldnt4s";
368 goto do_load;
369 case OE_RR_X1(LDNT4U):
370 memop = MO_TEUL;
371 mnemonic = "ldnt4u";
372 goto do_load;
373 case OE_RR_X1(LDNT):
374 memop = MO_TEQ;
375 mnemonic = "ldnt";
376 goto do_load;
377 case OE_RR_X1(LD):
378 memop = MO_TEQ;
379 mnemonic = "ld";
380 do_load:
381 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
382 break;
383 case OE_RR_X1(LDNA):
384 tcg_gen_andi_tl(tdest, tsrca, ~7);
385 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
386 mnemonic = "ldna";
387 break;
388 case OE_RR_X1(LNK):
389 case OE_RR_Y1(LNK):
390 if (srca) {
391 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
393 tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
394 mnemonic = "lnk";
395 break;
396 case OE_RR_X1(MF):
397 case OE_RR_X1(NAP):
398 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
399 case OE_RR_X0(PCNT):
400 case OE_RR_Y0(PCNT):
401 gen_helper_pcnt(tdest, tsrca);
402 mnemonic = "pcnt";
403 break;
404 case OE_RR_X0(REVBITS):
405 case OE_RR_Y0(REVBITS):
406 gen_helper_revbits(tdest, tsrca);
407 mnemonic = "revbits";
408 break;
409 case OE_RR_X0(REVBYTES):
410 case OE_RR_Y0(REVBYTES):
411 tcg_gen_bswap64_tl(tdest, tsrca);
412 mnemonic = "revbytes";
413 break;
414 case OE_RR_X1(SWINT0):
415 case OE_RR_X1(SWINT1):
416 case OE_RR_X1(SWINT2):
417 case OE_RR_X1(SWINT3):
418 case OE_RR_X0(TBLIDXB0):
419 case OE_RR_Y0(TBLIDXB0):
420 case OE_RR_X0(TBLIDXB1):
421 case OE_RR_Y0(TBLIDXB1):
422 case OE_RR_X0(TBLIDXB2):
423 case OE_RR_Y0(TBLIDXB2):
424 case OE_RR_X0(TBLIDXB3):
425 case OE_RR_Y0(TBLIDXB3):
426 case OE_RR_X1(WH64):
427 default:
428 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
432 reg_names[dest], reg_names[srca]);
433 return TILEGX_EXCP_NONE;
436 static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
437 unsigned dest, unsigned srca, unsigned srcb)
439 TCGv tdest = dest_gr(dc, dest);
440 TCGv tsrca = load_gr(dc, srca);
441 TCGv tsrcb = load_gr(dc, srcb);
442 const char *mnemonic;
444 switch (opext) {
445 case OE_RRR(ADDXSC, 0, X0):
446 case OE_RRR(ADDXSC, 0, X1):
447 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
448 mnemonic = "addxsc";
449 break;
450 case OE_RRR(ADDX, 0, X0):
451 case OE_RRR(ADDX, 0, X1):
452 case OE_RRR(ADDX, 0, Y0):
453 case OE_RRR(ADDX, 0, Y1):
454 tcg_gen_add_tl(tdest, tsrca, tsrcb);
455 tcg_gen_ext32s_tl(tdest, tdest);
456 mnemonic = "addx";
457 break;
458 case OE_RRR(ADD, 0, X0):
459 case OE_RRR(ADD, 0, X1):
460 case OE_RRR(ADD, 0, Y0):
461 case OE_RRR(ADD, 0, Y1):
462 tcg_gen_add_tl(tdest, tsrca, tsrcb);
463 mnemonic = "add";
464 break;
465 case OE_RRR(AND, 0, X0):
466 case OE_RRR(AND, 0, X1):
467 case OE_RRR(AND, 5, Y0):
468 case OE_RRR(AND, 5, Y1):
469 tcg_gen_and_tl(tdest, tsrca, tsrcb);
470 mnemonic = "and";
471 break;
472 case OE_RRR(CMOVEQZ, 0, X0):
473 case OE_RRR(CMOVEQZ, 4, Y0):
474 case OE_RRR(CMOVNEZ, 0, X0):
475 case OE_RRR(CMOVNEZ, 4, Y0):
476 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
477 case OE_RRR(CMPEQ, 0, X0):
478 case OE_RRR(CMPEQ, 0, X1):
479 case OE_RRR(CMPEQ, 3, Y0):
480 case OE_RRR(CMPEQ, 3, Y1):
481 tcg_gen_setcond_tl(TCG_COND_EQ, tdest, tsrca, tsrcb);
482 mnemonic = "cmpeq";
483 break;
484 case OE_RRR(CMPEXCH4, 0, X1):
485 case OE_RRR(CMPEXCH, 0, X1):
486 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
487 case OE_RRR(CMPLES, 0, X0):
488 case OE_RRR(CMPLES, 0, X1):
489 case OE_RRR(CMPLES, 2, Y0):
490 case OE_RRR(CMPLES, 2, Y1):
491 tcg_gen_setcond_tl(TCG_COND_LE, tdest, tsrca, tsrcb);
492 mnemonic = "cmples";
493 break;
494 case OE_RRR(CMPLEU, 0, X0):
495 case OE_RRR(CMPLEU, 0, X1):
496 case OE_RRR(CMPLEU, 2, Y0):
497 case OE_RRR(CMPLEU, 2, Y1):
498 tcg_gen_setcond_tl(TCG_COND_LEU, tdest, tsrca, tsrcb);
499 mnemonic = "cmpleu";
500 break;
501 case OE_RRR(CMPLTS, 0, X0):
502 case OE_RRR(CMPLTS, 0, X1):
503 case OE_RRR(CMPLTS, 2, Y0):
504 case OE_RRR(CMPLTS, 2, Y1):
505 tcg_gen_setcond_tl(TCG_COND_LT, tdest, tsrca, tsrcb);
506 mnemonic = "cmplts";
507 break;
508 case OE_RRR(CMPLTU, 0, X0):
509 case OE_RRR(CMPLTU, 0, X1):
510 case OE_RRR(CMPLTU, 2, Y0):
511 case OE_RRR(CMPLTU, 2, Y1):
512 tcg_gen_setcond_tl(TCG_COND_LTU, tdest, tsrca, tsrcb);
513 mnemonic = "cmpltu";
514 break;
515 case OE_RRR(CMPNE, 0, X0):
516 case OE_RRR(CMPNE, 0, X1):
517 case OE_RRR(CMPNE, 3, Y0):
518 case OE_RRR(CMPNE, 3, Y1):
519 tcg_gen_setcond_tl(TCG_COND_NE, tdest, tsrca, tsrcb);
520 mnemonic = "cmpne";
521 break;
522 case OE_RRR(CMULAF, 0, X0):
523 case OE_RRR(CMULA, 0, X0):
524 case OE_RRR(CMULFR, 0, X0):
525 case OE_RRR(CMULF, 0, X0):
526 case OE_RRR(CMULHR, 0, X0):
527 case OE_RRR(CMULH, 0, X0):
528 case OE_RRR(CMUL, 0, X0):
529 case OE_RRR(CRC32_32, 0, X0):
530 case OE_RRR(CRC32_8, 0, X0):
531 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
532 case OE_RRR(DBLALIGN2, 0, X0):
533 case OE_RRR(DBLALIGN2, 0, X1):
534 gen_dblaligni(tdest, tsrca, tsrcb, 16);
535 mnemonic = "dblalign2";
536 break;
537 case OE_RRR(DBLALIGN4, 0, X0):
538 case OE_RRR(DBLALIGN4, 0, X1):
539 gen_dblaligni(tdest, tsrca, tsrcb, 32);
540 mnemonic = "dblalign4";
541 break;
542 case OE_RRR(DBLALIGN6, 0, X0):
543 case OE_RRR(DBLALIGN6, 0, X1):
544 gen_dblaligni(tdest, tsrca, tsrcb, 48);
545 mnemonic = "dblalign6";
546 break;
547 case OE_RRR(DBLALIGN, 0, X0):
548 gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
549 mnemonic = "dblalign";
550 break;
551 case OE_RRR(EXCH4, 0, X1):
552 case OE_RRR(EXCH, 0, X1):
553 case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
554 case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
555 case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
556 case OE_RRR(FDOUBLE_PACK1, 0, X0):
557 case OE_RRR(FDOUBLE_PACK2, 0, X0):
558 case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
559 case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
560 case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
561 case OE_RRR(FETCHADD4, 0, X1):
562 case OE_RRR(FETCHADDGEZ4, 0, X1):
563 case OE_RRR(FETCHADDGEZ, 0, X1):
564 case OE_RRR(FETCHADD, 0, X1):
565 case OE_RRR(FETCHAND4, 0, X1):
566 case OE_RRR(FETCHAND, 0, X1):
567 case OE_RRR(FETCHOR4, 0, X1):
568 case OE_RRR(FETCHOR, 0, X1):
569 case OE_RRR(FSINGLE_ADD1, 0, X0):
570 case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
571 case OE_RRR(FSINGLE_MUL1, 0, X0):
572 case OE_RRR(FSINGLE_MUL2, 0, X0):
573 case OE_RRR(FSINGLE_PACK2, 0, X0):
574 case OE_RRR(FSINGLE_SUB1, 0, X0):
575 case OE_RRR(MNZ, 0, X0):
576 case OE_RRR(MNZ, 0, X1):
577 case OE_RRR(MNZ, 4, Y0):
578 case OE_RRR(MNZ, 4, Y1):
579 case OE_RRR(MULAX, 0, X0):
580 case OE_RRR(MULAX, 3, Y0):
581 case OE_RRR(MULA_HS_HS, 0, X0):
582 case OE_RRR(MULA_HS_HS, 9, Y0):
583 case OE_RRR(MULA_HS_HU, 0, X0):
584 case OE_RRR(MULA_HS_LS, 0, X0):
585 case OE_RRR(MULA_HS_LU, 0, X0):
586 case OE_RRR(MULA_HU_HU, 0, X0):
587 case OE_RRR(MULA_HU_HU, 9, Y0):
588 case OE_RRR(MULA_HU_LS, 0, X0):
589 case OE_RRR(MULA_HU_LU, 0, X0):
590 case OE_RRR(MULA_LS_LS, 0, X0):
591 case OE_RRR(MULA_LS_LS, 9, Y0):
592 case OE_RRR(MULA_LS_LU, 0, X0):
593 case OE_RRR(MULA_LU_LU, 0, X0):
594 case OE_RRR(MULA_LU_LU, 9, Y0):
595 case OE_RRR(MULX, 0, X0):
596 case OE_RRR(MULX, 3, Y0):
597 case OE_RRR(MUL_HS_HS, 0, X0):
598 case OE_RRR(MUL_HS_HS, 8, Y0):
599 case OE_RRR(MUL_HS_HU, 0, X0):
600 case OE_RRR(MUL_HS_LS, 0, X0):
601 case OE_RRR(MUL_HS_LU, 0, X0):
602 case OE_RRR(MUL_HU_HU, 0, X0):
603 case OE_RRR(MUL_HU_HU, 8, Y0):
604 case OE_RRR(MUL_HU_LS, 0, X0):
605 case OE_RRR(MUL_HU_LU, 0, X0):
606 case OE_RRR(MUL_LS_LS, 0, X0):
607 case OE_RRR(MUL_LS_LS, 8, Y0):
608 case OE_RRR(MUL_LS_LU, 0, X0):
609 case OE_RRR(MUL_LU_LU, 0, X0):
610 case OE_RRR(MUL_LU_LU, 8, Y0):
611 case OE_RRR(MZ, 0, X0):
612 case OE_RRR(MZ, 0, X1):
613 case OE_RRR(MZ, 4, Y0):
614 case OE_RRR(MZ, 4, Y1):
615 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
616 case OE_RRR(NOR, 0, X0):
617 case OE_RRR(NOR, 0, X1):
618 case OE_RRR(NOR, 5, Y0):
619 case OE_RRR(NOR, 5, Y1):
620 tcg_gen_nor_tl(tdest, tsrca, tsrcb);
621 mnemonic = "nor";
622 break;
623 case OE_RRR(OR, 0, X0):
624 case OE_RRR(OR, 0, X1):
625 case OE_RRR(OR, 5, Y0):
626 case OE_RRR(OR, 5, Y1):
627 tcg_gen_or_tl(tdest, tsrca, tsrcb);
628 mnemonic = "or";
629 break;
630 case OE_RRR(ROTL, 0, X0):
631 case OE_RRR(ROTL, 0, X1):
632 case OE_RRR(ROTL, 6, Y0):
633 case OE_RRR(ROTL, 6, Y1):
634 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
635 case OE_RRR(SHL1ADDX, 0, X0):
636 case OE_RRR(SHL1ADDX, 0, X1):
637 case OE_RRR(SHL1ADDX, 7, Y0):
638 case OE_RRR(SHL1ADDX, 7, Y1):
639 tcg_gen_shli_tl(tdest, tsrca, 1);
640 tcg_gen_add_tl(tdest, tdest, tsrcb);
641 tcg_gen_ext32s_tl(tdest, tdest);
642 mnemonic = "shl1addx";
643 break;
644 case OE_RRR(SHL1ADD, 0, X0):
645 case OE_RRR(SHL1ADD, 0, X1):
646 case OE_RRR(SHL1ADD, 1, Y0):
647 case OE_RRR(SHL1ADD, 1, Y1):
648 tcg_gen_shli_tl(tdest, tsrca, 1);
649 tcg_gen_add_tl(tdest, tdest, tsrcb);
650 mnemonic = "shl1add";
651 break;
652 case OE_RRR(SHL2ADDX, 0, X0):
653 case OE_RRR(SHL2ADDX, 0, X1):
654 case OE_RRR(SHL2ADDX, 7, Y0):
655 case OE_RRR(SHL2ADDX, 7, Y1):
656 tcg_gen_shli_tl(tdest, tsrca, 2);
657 tcg_gen_add_tl(tdest, tdest, tsrcb);
658 tcg_gen_ext32s_tl(tdest, tdest);
659 mnemonic = "shl2addx";
660 break;
661 case OE_RRR(SHL2ADD, 0, X0):
662 case OE_RRR(SHL2ADD, 0, X1):
663 case OE_RRR(SHL2ADD, 1, Y0):
664 case OE_RRR(SHL2ADD, 1, Y1):
665 tcg_gen_shli_tl(tdest, tsrca, 2);
666 tcg_gen_add_tl(tdest, tdest, tsrcb);
667 mnemonic = "shl2add";
668 break;
669 case OE_RRR(SHL3ADDX, 0, X0):
670 case OE_RRR(SHL3ADDX, 0, X1):
671 case OE_RRR(SHL3ADDX, 7, Y0):
672 case OE_RRR(SHL3ADDX, 7, Y1):
673 tcg_gen_shli_tl(tdest, tsrca, 3);
674 tcg_gen_add_tl(tdest, tdest, tsrcb);
675 tcg_gen_ext32s_tl(tdest, tdest);
676 mnemonic = "shl3addx";
677 break;
678 case OE_RRR(SHL3ADD, 0, X0):
679 case OE_RRR(SHL3ADD, 0, X1):
680 case OE_RRR(SHL3ADD, 1, Y0):
681 case OE_RRR(SHL3ADD, 1, Y1):
682 tcg_gen_shli_tl(tdest, tsrca, 3);
683 tcg_gen_add_tl(tdest, tdest, tsrcb);
684 mnemonic = "shl3add";
685 break;
686 case OE_RRR(SHLX, 0, X0):
687 case OE_RRR(SHLX, 0, X1):
688 case OE_RRR(SHL, 0, X0):
689 case OE_RRR(SHL, 0, X1):
690 case OE_RRR(SHL, 6, Y0):
691 case OE_RRR(SHL, 6, Y1):
692 case OE_RRR(SHRS, 0, X0):
693 case OE_RRR(SHRS, 0, X1):
694 case OE_RRR(SHRS, 6, Y0):
695 case OE_RRR(SHRS, 6, Y1):
696 case OE_RRR(SHRUX, 0, X0):
697 case OE_RRR(SHRUX, 0, X1):
698 case OE_RRR(SHRU, 0, X0):
699 case OE_RRR(SHRU, 0, X1):
700 case OE_RRR(SHRU, 6, Y0):
701 case OE_RRR(SHRU, 6, Y1):
702 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
703 case OE_RRR(SHUFFLEBYTES, 0, X0):
704 gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
705 mnemonic = "shufflebytes";
706 break;
707 case OE_RRR(SUBXSC, 0, X0):
708 case OE_RRR(SUBXSC, 0, X1):
709 gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
710 mnemonic = "subxsc";
711 break;
712 case OE_RRR(SUBX, 0, X0):
713 case OE_RRR(SUBX, 0, X1):
714 case OE_RRR(SUBX, 0, Y0):
715 case OE_RRR(SUBX, 0, Y1):
716 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
717 tcg_gen_ext32s_tl(tdest, tdest);
718 mnemonic = "subx";
719 break;
720 case OE_RRR(SUB, 0, X0):
721 case OE_RRR(SUB, 0, X1):
722 case OE_RRR(SUB, 0, Y0):
723 case OE_RRR(SUB, 0, Y1):
724 tcg_gen_sub_tl(tdest, tsrca, tsrcb);
725 mnemonic = "sub";
726 break;
727 case OE_RRR(V1ADDUC, 0, X0):
728 case OE_RRR(V1ADDUC, 0, X1):
729 case OE_RRR(V1ADD, 0, X0):
730 case OE_RRR(V1ADD, 0, X1):
731 case OE_RRR(V1ADIFFU, 0, X0):
732 case OE_RRR(V1AVGU, 0, X0):
733 case OE_RRR(V1CMPEQ, 0, X0):
734 case OE_RRR(V1CMPEQ, 0, X1):
735 case OE_RRR(V1CMPLES, 0, X0):
736 case OE_RRR(V1CMPLES, 0, X1):
737 case OE_RRR(V1CMPLEU, 0, X0):
738 case OE_RRR(V1CMPLEU, 0, X1):
739 case OE_RRR(V1CMPLTS, 0, X0):
740 case OE_RRR(V1CMPLTS, 0, X1):
741 case OE_RRR(V1CMPLTU, 0, X0):
742 case OE_RRR(V1CMPLTU, 0, X1):
743 case OE_RRR(V1CMPNE, 0, X0):
744 case OE_RRR(V1CMPNE, 0, X1):
745 case OE_RRR(V1DDOTPUA, 0, X0):
746 case OE_RRR(V1DDOTPUSA, 0, X0):
747 case OE_RRR(V1DDOTPUS, 0, X0):
748 case OE_RRR(V1DDOTPU, 0, X0):
749 case OE_RRR(V1DOTPA, 0, X0):
750 case OE_RRR(V1DOTPUA, 0, X0):
751 case OE_RRR(V1DOTPUSA, 0, X0):
752 case OE_RRR(V1DOTPUS, 0, X0):
753 case OE_RRR(V1DOTPU, 0, X0):
754 case OE_RRR(V1DOTP, 0, X0):
755 case OE_RRR(V1INT_H, 0, X0):
756 case OE_RRR(V1INT_H, 0, X1):
757 case OE_RRR(V1INT_L, 0, X0):
758 case OE_RRR(V1INT_L, 0, X1):
759 case OE_RRR(V1MAXU, 0, X0):
760 case OE_RRR(V1MAXU, 0, X1):
761 case OE_RRR(V1MINU, 0, X0):
762 case OE_RRR(V1MINU, 0, X1):
763 case OE_RRR(V1MNZ, 0, X0):
764 case OE_RRR(V1MNZ, 0, X1):
765 case OE_RRR(V1MULTU, 0, X0):
766 case OE_RRR(V1MULUS, 0, X0):
767 case OE_RRR(V1MULU, 0, X0):
768 case OE_RRR(V1MZ, 0, X0):
769 case OE_RRR(V1MZ, 0, X1):
770 case OE_RRR(V1SADAU, 0, X0):
771 case OE_RRR(V1SADU, 0, X0):
772 case OE_RRR(V1SHL, 0, X0):
773 case OE_RRR(V1SHL, 0, X1):
774 case OE_RRR(V1SHRS, 0, X0):
775 case OE_RRR(V1SHRS, 0, X1):
776 case OE_RRR(V1SHRU, 0, X0):
777 case OE_RRR(V1SHRU, 0, X1):
778 case OE_RRR(V1SUBUC, 0, X0):
779 case OE_RRR(V1SUBUC, 0, X1):
780 case OE_RRR(V1SUB, 0, X0):
781 case OE_RRR(V1SUB, 0, X1):
782 case OE_RRR(V2ADDSC, 0, X0):
783 case OE_RRR(V2ADDSC, 0, X1):
784 case OE_RRR(V2ADD, 0, X0):
785 case OE_RRR(V2ADD, 0, X1):
786 case OE_RRR(V2ADIFFS, 0, X0):
787 case OE_RRR(V2AVGS, 0, X0):
788 case OE_RRR(V2CMPEQ, 0, X0):
789 case OE_RRR(V2CMPEQ, 0, X1):
790 case OE_RRR(V2CMPLES, 0, X0):
791 case OE_RRR(V2CMPLES, 0, X1):
792 case OE_RRR(V2CMPLEU, 0, X0):
793 case OE_RRR(V2CMPLEU, 0, X1):
794 case OE_RRR(V2CMPLTS, 0, X0):
795 case OE_RRR(V2CMPLTS, 0, X1):
796 case OE_RRR(V2CMPLTU, 0, X0):
797 case OE_RRR(V2CMPLTU, 0, X1):
798 case OE_RRR(V2CMPNE, 0, X0):
799 case OE_RRR(V2CMPNE, 0, X1):
800 case OE_RRR(V2DOTPA, 0, X0):
801 case OE_RRR(V2DOTP, 0, X0):
802 case OE_RRR(V2INT_H, 0, X0):
803 case OE_RRR(V2INT_H, 0, X1):
804 case OE_RRR(V2INT_L, 0, X0):
805 case OE_RRR(V2INT_L, 0, X1):
806 case OE_RRR(V2MAXS, 0, X0):
807 case OE_RRR(V2MAXS, 0, X1):
808 case OE_RRR(V2MINS, 0, X0):
809 case OE_RRR(V2MINS, 0, X1):
810 case OE_RRR(V2MNZ, 0, X0):
811 case OE_RRR(V2MNZ, 0, X1):
812 case OE_RRR(V2MULFSC, 0, X0):
813 case OE_RRR(V2MULS, 0, X0):
814 case OE_RRR(V2MULTS, 0, X0):
815 case OE_RRR(V2MZ, 0, X0):
816 case OE_RRR(V2MZ, 0, X1):
817 case OE_RRR(V2PACKH, 0, X0):
818 case OE_RRR(V2PACKH, 0, X1):
819 case OE_RRR(V2PACKL, 0, X0):
820 case OE_RRR(V2PACKL, 0, X1):
821 case OE_RRR(V2PACKUC, 0, X0):
822 case OE_RRR(V2PACKUC, 0, X1):
823 case OE_RRR(V2SADAS, 0, X0):
824 case OE_RRR(V2SADAU, 0, X0):
825 case OE_RRR(V2SADS, 0, X0):
826 case OE_RRR(V2SADU, 0, X0):
827 case OE_RRR(V2SHLSC, 0, X0):
828 case OE_RRR(V2SHLSC, 0, X1):
829 case OE_RRR(V2SHL, 0, X0):
830 case OE_RRR(V2SHL, 0, X1):
831 case OE_RRR(V2SHRS, 0, X0):
832 case OE_RRR(V2SHRS, 0, X1):
833 case OE_RRR(V2SHRU, 0, X0):
834 case OE_RRR(V2SHRU, 0, X1):
835 case OE_RRR(V2SUBSC, 0, X0):
836 case OE_RRR(V2SUBSC, 0, X1):
837 case OE_RRR(V2SUB, 0, X0):
838 case OE_RRR(V2SUB, 0, X1):
839 case OE_RRR(V4ADDSC, 0, X0):
840 case OE_RRR(V4ADDSC, 0, X1):
841 case OE_RRR(V4ADD, 0, X0):
842 case OE_RRR(V4ADD, 0, X1):
843 case OE_RRR(V4INT_H, 0, X0):
844 case OE_RRR(V4INT_H, 0, X1):
845 case OE_RRR(V4INT_L, 0, X0):
846 case OE_RRR(V4INT_L, 0, X1):
847 case OE_RRR(V4PACKSC, 0, X0):
848 case OE_RRR(V4PACKSC, 0, X1):
849 case OE_RRR(V4SHLSC, 0, X0):
850 case OE_RRR(V4SHLSC, 0, X1):
851 case OE_RRR(V4SHL, 0, X0):
852 case OE_RRR(V4SHL, 0, X1):
853 case OE_RRR(V4SHRS, 0, X0):
854 case OE_RRR(V4SHRS, 0, X1):
855 case OE_RRR(V4SHRU, 0, X0):
856 case OE_RRR(V4SHRU, 0, X1):
857 case OE_RRR(V4SUBSC, 0, X0):
858 case OE_RRR(V4SUBSC, 0, X1):
859 case OE_RRR(V4SUB, 0, X0):
860 case OE_RRR(V4SUB, 0, X1):
861 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
862 case OE_RRR(XOR, 0, X0):
863 case OE_RRR(XOR, 0, X1):
864 case OE_RRR(XOR, 5, Y0):
865 case OE_RRR(XOR, 5, Y1):
866 tcg_gen_xor_tl(tdest, tsrca, tsrcb);
867 mnemonic = "xor";
868 break;
869 default:
870 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
873 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
874 reg_names[dest], reg_names[srca], reg_names[srcb]);
875 return TILEGX_EXCP_NONE;
878 static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
879 unsigned dest, unsigned srca, int imm)
881 TCGv tdest = dest_gr(dc, dest);
882 TCGv tsrca = load_gr(dc, srca);
883 const char *mnemonic;
884 TCGMemOp memop;
886 switch (opext) {
887 case OE(ADDI_OPCODE_Y0, 0, Y0):
888 case OE(ADDI_OPCODE_Y1, 0, Y1):
889 case OE_IM(ADDI, X0):
890 case OE_IM(ADDI, X1):
891 tcg_gen_addi_tl(tdest, tsrca, imm);
892 mnemonic = "addi";
893 break;
894 case OE(ADDXI_OPCODE_Y0, 0, Y0):
895 case OE(ADDXI_OPCODE_Y1, 0, Y1):
896 case OE_IM(ADDXI, X0):
897 case OE_IM(ADDXI, X1):
898 tcg_gen_addi_tl(tdest, tsrca, imm);
899 tcg_gen_ext32s_tl(tdest, tdest);
900 mnemonic = "addxi";
901 break;
902 case OE(ANDI_OPCODE_Y0, 0, Y0):
903 case OE(ANDI_OPCODE_Y1, 0, Y1):
904 case OE_IM(ANDI, X0):
905 case OE_IM(ANDI, X1):
906 tcg_gen_andi_tl(tdest, tsrca, imm);
907 mnemonic = "andi";
908 break;
909 case OE(CMPEQI_OPCODE_Y0, 0, Y0):
910 case OE(CMPEQI_OPCODE_Y1, 0, Y1):
911 case OE_IM(CMPEQI, X0):
912 case OE_IM(CMPEQI, X1):
913 tcg_gen_setcondi_tl(TCG_COND_EQ, tdest, tsrca, imm);
914 mnemonic = "cmpeqi";
915 break;
916 case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
917 case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
918 case OE_IM(CMPLTSI, X0):
919 case OE_IM(CMPLTSI, X1):
920 tcg_gen_setcondi_tl(TCG_COND_LT, tdest, tsrca, imm);
921 mnemonic = "cmpltsi";
922 break;
923 case OE_IM(CMPLTUI, X0):
924 case OE_IM(CMPLTUI, X1):
925 tcg_gen_setcondi_tl(TCG_COND_LTU, tdest, tsrca, imm);
926 mnemonic = "cmpltui";
927 break;
928 case OE_IM(LD1S_ADD, X1):
929 memop = MO_SB;
930 mnemonic = "ld1s_add";
931 goto do_load_add;
932 case OE_IM(LD1U_ADD, X1):
933 memop = MO_UB;
934 mnemonic = "ld1u_add";
935 goto do_load_add;
936 case OE_IM(LD2S_ADD, X1):
937 memop = MO_TESW;
938 mnemonic = "ld2s_add";
939 goto do_load_add;
940 case OE_IM(LD2U_ADD, X1):
941 memop = MO_TEUW;
942 mnemonic = "ld2u_add";
943 goto do_load_add;
944 case OE_IM(LD4S_ADD, X1):
945 memop = MO_TESL;
946 mnemonic = "ld4s_add";
947 goto do_load_add;
948 case OE_IM(LD4U_ADD, X1):
949 memop = MO_TEUL;
950 mnemonic = "ld4u_add";
951 goto do_load_add;
952 case OE_IM(LDNT1S_ADD, X1):
953 memop = MO_SB;
954 mnemonic = "ldnt1s_add";
955 goto do_load_add;
956 case OE_IM(LDNT1U_ADD, X1):
957 memop = MO_UB;
958 mnemonic = "ldnt1u_add";
959 goto do_load_add;
960 case OE_IM(LDNT2S_ADD, X1):
961 memop = MO_TESW;
962 mnemonic = "ldnt2s_add";
963 goto do_load_add;
964 case OE_IM(LDNT2U_ADD, X1):
965 memop = MO_TEUW;
966 mnemonic = "ldnt2u_add";
967 goto do_load_add;
968 case OE_IM(LDNT4S_ADD, X1):
969 memop = MO_TESL;
970 mnemonic = "ldnt4s_add";
971 goto do_load_add;
972 case OE_IM(LDNT4U_ADD, X1):
973 memop = MO_TEUL;
974 mnemonic = "ldnt4u_add";
975 goto do_load_add;
976 case OE_IM(LDNT_ADD, X1):
977 memop = MO_TEQ;
978 mnemonic = "ldnt_add";
979 goto do_load_add;
980 case OE_IM(LD_ADD, X1):
981 memop = MO_TEQ;
982 mnemonic = "ldnt_add";
983 do_load_add:
984 tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
985 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
986 break;
987 case OE_IM(LDNA_ADD, X1):
988 tcg_gen_andi_tl(tdest, tsrca, ~7);
989 tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
990 tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
991 mnemonic = "ldna_add";
992 break;
993 case OE_IM(MFSPR, X1):
994 case OE_IM(MTSPR, X1):
995 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
996 case OE_IM(ORI, X0):
997 case OE_IM(ORI, X1):
998 tcg_gen_ori_tl(tdest, tsrca, imm);
999 mnemonic = "ori";
1000 break;
1001 case OE_IM(V1ADDI, X0):
1002 case OE_IM(V1ADDI, X1):
1003 case OE_IM(V1CMPEQI, X0):
1004 case OE_IM(V1CMPEQI, X1):
1005 case OE_IM(V1CMPLTSI, X0):
1006 case OE_IM(V1CMPLTSI, X1):
1007 case OE_IM(V1CMPLTUI, X0):
1008 case OE_IM(V1CMPLTUI, X1):
1009 case OE_IM(V1MAXUI, X0):
1010 case OE_IM(V1MAXUI, X1):
1011 case OE_IM(V1MINUI, X0):
1012 case OE_IM(V1MINUI, X1):
1013 case OE_IM(V2ADDI, X0):
1014 case OE_IM(V2ADDI, X1):
1015 case OE_IM(V2CMPEQI, X0):
1016 case OE_IM(V2CMPEQI, X1):
1017 case OE_IM(V2CMPLTSI, X0):
1018 case OE_IM(V2CMPLTSI, X1):
1019 case OE_IM(V2CMPLTUI, X0):
1020 case OE_IM(V2CMPLTUI, X1):
1021 case OE_IM(V2MAXSI, X0):
1022 case OE_IM(V2MAXSI, X1):
1023 case OE_IM(V2MINSI, X0):
1024 case OE_IM(V2MINSI, X1):
1025 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1026 case OE_IM(XORI, X0):
1027 case OE_IM(XORI, X1):
1028 tcg_gen_xori_tl(tdest, tsrca, imm);
1029 mnemonic = "xori";
1030 break;
1032 case OE_SH(ROTLI, X0):
1033 case OE_SH(ROTLI, X1):
1034 case OE_SH(ROTLI, Y0):
1035 case OE_SH(ROTLI, Y1):
1036 case OE_SH(SHLI, X0):
1037 case OE_SH(SHLI, X1):
1038 case OE_SH(SHLI, Y0):
1039 case OE_SH(SHLI, Y1):
1040 case OE_SH(SHLXI, X0):
1041 case OE_SH(SHLXI, X1):
1042 case OE_SH(SHRSI, X0):
1043 case OE_SH(SHRSI, X1):
1044 case OE_SH(SHRSI, Y0):
1045 case OE_SH(SHRSI, Y1):
1046 case OE_SH(SHRUI, X0):
1047 case OE_SH(SHRUI, X1):
1048 case OE_SH(SHRUI, Y0):
1049 case OE_SH(SHRUI, Y1):
1050 case OE_SH(SHRUXI, X0):
1051 case OE_SH(SHRUXI, X1):
1052 case OE_SH(V1SHLI, X0):
1053 case OE_SH(V1SHLI, X1):
1054 case OE_SH(V1SHRSI, X0):
1055 case OE_SH(V1SHRSI, X1):
1056 case OE_SH(V1SHRUI, X0):
1057 case OE_SH(V1SHRUI, X1):
1058 case OE_SH(V2SHLI, X0):
1059 case OE_SH(V2SHLI, X1):
1060 case OE_SH(V2SHRSI, X0):
1061 case OE_SH(V2SHRSI, X1):
1062 case OE_SH(V2SHRUI, X0):
1063 case OE_SH(V2SHRUI, X1):
1064 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1066 case OE(ADDLI_OPCODE_X0, 0, X0):
1067 case OE(ADDLI_OPCODE_X1, 0, X1):
1068 tcg_gen_addi_tl(tdest, tsrca, imm);
1069 mnemonic = "addli";
1070 break;
1071 case OE(ADDXLI_OPCODE_X0, 0, X0):
1072 case OE(ADDXLI_OPCODE_X1, 0, X1):
1073 tcg_gen_addi_tl(tdest, tsrca, imm);
1074 tcg_gen_ext32s_tl(tdest, tdest);
1075 mnemonic = "addxli";
1076 break;
1077 case OE(SHL16INSLI_OPCODE_X0, 0, X0):
1078 case OE(SHL16INSLI_OPCODE_X1, 0, X1):
1079 tcg_gen_shli_tl(tdest, tsrca, 16);
1080 tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
1081 mnemonic = "shl16insli";
1082 break;
1084 default:
1085 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1088 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
1089 reg_names[dest], reg_names[srca], imm);
1090 return TILEGX_EXCP_NONE;
1093 static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
1094 unsigned dest, unsigned srca,
1095 unsigned bfs, unsigned bfe)
1097 const char *mnemonic;
1099 switch (ext) {
1100 case BFEXTU_BF_OPCODE_X0:
1101 case BFEXTS_BF_OPCODE_X0:
1102 case BFINS_BF_OPCODE_X0:
1103 case MM_BF_OPCODE_X0:
1104 default:
1105 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1108 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
1109 reg_names[dest], reg_names[srca], bfs, bfe);
1110 return TILEGX_EXCP_NONE;
1113 static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
1114 unsigned srca, int off)
1116 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1117 const char *mnemonic;
1119 dc->jmp.dest = tcg_const_tl(tgt);
1120 dc->jmp.val1 = tcg_temp_new();
1121 tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca));
1123 /* Note that the "predict taken" opcodes have bit 0 clear.
1124 Therefore, fold the two cases together by setting bit 0. */
1125 switch (ext | 1) {
1126 case BEQZ_BRANCH_OPCODE_X1:
1127 dc->jmp.cond = TCG_COND_EQ;
1128 mnemonic = "beqz";
1129 break;
1130 case BNEZ_BRANCH_OPCODE_X1:
1131 dc->jmp.cond = TCG_COND_NE;
1132 mnemonic = "bnez";
1133 break;
1134 case BGEZ_BRANCH_OPCODE_X1:
1135 dc->jmp.cond = TCG_COND_GE;
1136 mnemonic = "bgez";
1137 break;
1138 case BGTZ_BRANCH_OPCODE_X1:
1139 dc->jmp.cond = TCG_COND_GT;
1140 mnemonic = "bgtz";
1141 break;
1142 case BLEZ_BRANCH_OPCODE_X1:
1143 dc->jmp.cond = TCG_COND_LE;
1144 mnemonic = "blez";
1145 break;
1146 case BLTZ_BRANCH_OPCODE_X1:
1147 dc->jmp.cond = TCG_COND_LT;
1148 mnemonic = "bltz";
1149 break;
1150 case BLBC_BRANCH_OPCODE_X1:
1151 dc->jmp.cond = TCG_COND_EQ;
1152 tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1153 mnemonic = "blbc";
1154 break;
1155 case BLBS_BRANCH_OPCODE_X1:
1156 dc->jmp.cond = TCG_COND_NE;
1157 tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
1158 mnemonic = "blbs";
1159 break;
1160 default:
1161 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1164 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1165 qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>",
1166 mnemonic, ext & 1 ? "" : "t",
1167 reg_names[srca], tgt, lookup_symbol(tgt));
1169 return TILEGX_EXCP_NONE;
1172 static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
1174 target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
1175 const char *mnemonic = "j";
1177 /* The extension field is 1 bit, therefore we only have JAL and J. */
1178 if (ext == JAL_JUMP_OPCODE_X1) {
1179 tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
1180 dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1181 mnemonic = "jal";
1183 dc->jmp.cond = TCG_COND_ALWAYS;
1184 dc->jmp.dest = tcg_const_tl(tgt);
1186 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1187 qemu_log("%s " TARGET_FMT_lx " <%s>",
1188 mnemonic, tgt, lookup_symbol(tgt));
1190 return TILEGX_EXCP_NONE;
1193 static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
1195 unsigned opc = get_Opcode_Y0(bundle);
1196 unsigned ext = get_RRROpcodeExtension_Y0(bundle);
1197 unsigned dest = get_Dest_Y0(bundle);
1198 unsigned srca = get_SrcA_Y0(bundle);
1199 unsigned srcb;
1200 int imm;
1202 switch (opc) {
1203 case RRR_1_OPCODE_Y0:
1204 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1205 ext = get_UnaryOpcodeExtension_Y0(bundle);
1206 return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca);
1208 /* fallthru */
1209 case RRR_0_OPCODE_Y0:
1210 case RRR_2_OPCODE_Y0:
1211 case RRR_3_OPCODE_Y0:
1212 case RRR_4_OPCODE_Y0:
1213 case RRR_5_OPCODE_Y0:
1214 case RRR_6_OPCODE_Y0:
1215 case RRR_7_OPCODE_Y0:
1216 case RRR_8_OPCODE_Y0:
1217 case RRR_9_OPCODE_Y0:
1218 srcb = get_SrcB_Y0(bundle);
1219 return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
1221 case SHIFT_OPCODE_Y0:
1222 ext = get_ShiftOpcodeExtension_Y0(bundle);
1223 imm = get_ShAmt_Y0(bundle);
1224 return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
1226 case ADDI_OPCODE_Y0:
1227 case ADDXI_OPCODE_Y0:
1228 case ANDI_OPCODE_Y0:
1229 case CMPEQI_OPCODE_Y0:
1230 case CMPLTSI_OPCODE_Y0:
1231 imm = (int8_t)get_Imm8_Y0(bundle);
1232 return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
1234 default:
1235 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1239 static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
1241 unsigned opc = get_Opcode_Y1(bundle);
1242 unsigned ext = get_RRROpcodeExtension_Y1(bundle);
1243 unsigned dest = get_Dest_Y1(bundle);
1244 unsigned srca = get_SrcA_Y1(bundle);
1245 unsigned srcb;
1246 int imm;
1248 switch (get_Opcode_Y1(bundle)) {
1249 case RRR_1_OPCODE_Y1:
1250 if (ext == UNARY_RRR_1_OPCODE_Y0) {
1251 ext = get_UnaryOpcodeExtension_Y1(bundle);
1252 return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca);
1254 /* fallthru */
1255 case RRR_0_OPCODE_Y1:
1256 case RRR_2_OPCODE_Y1:
1257 case RRR_3_OPCODE_Y1:
1258 case RRR_4_OPCODE_Y1:
1259 case RRR_5_OPCODE_Y1:
1260 case RRR_6_OPCODE_Y1:
1261 case RRR_7_OPCODE_Y1:
1262 srcb = get_SrcB_Y1(bundle);
1263 return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
1265 case SHIFT_OPCODE_Y1:
1266 ext = get_ShiftOpcodeExtension_Y1(bundle);
1267 imm = get_ShAmt_Y1(bundle);
1268 return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
1270 case ADDI_OPCODE_Y1:
1271 case ADDXI_OPCODE_Y1:
1272 case ANDI_OPCODE_Y1:
1273 case CMPEQI_OPCODE_Y1:
1274 case CMPLTSI_OPCODE_Y1:
1275 imm = (int8_t)get_Imm8_Y1(bundle);
1276 return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
1278 default:
1279 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1283 static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
1285 unsigned mode = get_Mode(bundle);
1286 unsigned opc = get_Opcode_Y2(bundle);
1287 unsigned srca = get_SrcA_Y2(bundle);
1288 unsigned srcbdest = get_SrcBDest_Y2(bundle);
1289 const char *mnemonic;
1290 TCGMemOp memop;
1292 switch (OEY2(opc, mode)) {
1293 case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
1294 memop = MO_SB;
1295 mnemonic = "ld1s";
1296 goto do_load;
1297 case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
1298 memop = MO_UB;
1299 mnemonic = "ld1u";
1300 goto do_load;
1301 case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
1302 memop = MO_TESW;
1303 mnemonic = "ld2s";
1304 goto do_load;
1305 case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
1306 memop = MO_TEUW;
1307 mnemonic = "ld2u";
1308 goto do_load;
1309 case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
1310 memop = MO_TESL;
1311 mnemonic = "ld4s";
1312 goto do_load;
1313 case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
1314 memop = MO_TEUL;
1315 mnemonic = "ld4u";
1316 goto do_load;
1317 case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
1318 memop = MO_TEQ;
1319 mnemonic = "ld";
1320 do_load:
1321 tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
1322 dc->mmuidx, memop);
1323 qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
1324 reg_names[srcbdest], reg_names[srca]);
1325 return TILEGX_EXCP_NONE;
1327 case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
1328 return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
1329 case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
1330 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
1331 case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
1332 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
1333 case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
1334 return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
1336 default:
1337 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1341 static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
1343 unsigned opc = get_Opcode_X0(bundle);
1344 unsigned dest = get_Dest_X0(bundle);
1345 unsigned srca = get_SrcA_X0(bundle);
1346 unsigned ext, srcb, bfs, bfe;
1347 int imm;
1349 switch (opc) {
1350 case RRR_0_OPCODE_X0:
1351 ext = get_RRROpcodeExtension_X0(bundle);
1352 if (ext == UNARY_RRR_0_OPCODE_X0) {
1353 ext = get_UnaryOpcodeExtension_X0(bundle);
1354 return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca);
1356 srcb = get_SrcB_X0(bundle);
1357 return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
1359 case SHIFT_OPCODE_X0:
1360 ext = get_ShiftOpcodeExtension_X0(bundle);
1361 imm = get_ShAmt_X0(bundle);
1362 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1364 case IMM8_OPCODE_X0:
1365 ext = get_Imm8OpcodeExtension_X0(bundle);
1366 imm = (int8_t)get_Imm8_X0(bundle);
1367 return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
1369 case BF_OPCODE_X0:
1370 ext = get_BFOpcodeExtension_X0(bundle);
1371 bfs = get_BFStart_X0(bundle);
1372 bfe = get_BFEnd_X0(bundle);
1373 return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
1375 case ADDLI_OPCODE_X0:
1376 case SHL16INSLI_OPCODE_X0:
1377 case ADDXLI_OPCODE_X0:
1378 imm = (int16_t)get_Imm16_X0(bundle);
1379 return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
1381 default:
1382 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1386 static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
1388 unsigned opc = get_Opcode_X1(bundle);
1389 unsigned dest = get_Dest_X1(bundle);
1390 unsigned srca = get_SrcA_X1(bundle);
1391 unsigned ext, srcb;
1392 int imm;
1394 switch (opc) {
1395 case RRR_0_OPCODE_X1:
1396 ext = get_RRROpcodeExtension_X1(bundle);
1397 srcb = get_SrcB_X1(bundle);
1398 switch (ext) {
1399 case UNARY_RRR_0_OPCODE_X1:
1400 ext = get_UnaryOpcodeExtension_X1(bundle);
1401 return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca);
1402 case ST1_RRR_0_OPCODE_X1:
1403 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
1404 case ST2_RRR_0_OPCODE_X1:
1405 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
1406 case ST4_RRR_0_OPCODE_X1:
1407 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
1408 case STNT1_RRR_0_OPCODE_X1:
1409 return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
1410 case STNT2_RRR_0_OPCODE_X1:
1411 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
1412 case STNT4_RRR_0_OPCODE_X1:
1413 return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
1414 case STNT_RRR_0_OPCODE_X1:
1415 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
1416 case ST_RRR_0_OPCODE_X1:
1417 return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
1419 return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
1421 case SHIFT_OPCODE_X1:
1422 ext = get_ShiftOpcodeExtension_X1(bundle);
1423 imm = get_ShAmt_X1(bundle);
1424 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1426 case IMM8_OPCODE_X1:
1427 ext = get_Imm8OpcodeExtension_X1(bundle);
1428 imm = (int8_t)get_Dest_Imm8_X1(bundle);
1429 srcb = get_SrcB_X1(bundle);
1430 switch (ext) {
1431 case ST1_ADD_IMM8_OPCODE_X1:
1432 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
1433 case ST2_ADD_IMM8_OPCODE_X1:
1434 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
1435 case ST4_ADD_IMM8_OPCODE_X1:
1436 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
1437 case STNT1_ADD_IMM8_OPCODE_X1:
1438 return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
1439 case STNT2_ADD_IMM8_OPCODE_X1:
1440 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
1441 case STNT4_ADD_IMM8_OPCODE_X1:
1442 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
1443 case STNT_ADD_IMM8_OPCODE_X1:
1444 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
1445 case ST_ADD_IMM8_OPCODE_X1:
1446 return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
1448 imm = (int8_t)get_Imm8_X1(bundle);
1449 return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
1451 case BRANCH_OPCODE_X1:
1452 ext = get_BrType_X1(bundle);
1453 imm = sextract32(get_BrOff_X1(bundle), 0, 17);
1454 return gen_branch_opcode_x1(dc, ext, srca, imm);
1456 case JUMP_OPCODE_X1:
1457 ext = get_JumpOpcodeExtension_X1(bundle);
1458 imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
1459 return gen_jump_opcode_x1(dc, ext, imm);
1461 case ADDLI_OPCODE_X1:
1462 case SHL16INSLI_OPCODE_X1:
1463 case ADDXLI_OPCODE_X1:
1464 imm = (int16_t)get_Imm16_X1(bundle);
1465 return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
1467 default:
1468 return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
1472 static void notice_excp(DisasContext *dc, uint64_t bundle,
1473 const char *type, TileExcp excp)
1475 if (likely(excp == TILEGX_EXCP_NONE)) {
1476 return;
1478 gen_exception(dc, excp);
1479 if (excp == TILEGX_EXCP_OPCODE_UNIMPLEMENTED) {
1480 qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
1484 static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
1486 int i;
1488 for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
1489 DisasContextTemp *wb = &dc->wb[i];
1490 wb->reg = TILEGX_R_NOREG;
1491 TCGV_UNUSED_I64(wb->val);
1493 dc->num_wb = 0;
1495 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1496 tcg_gen_debug_insn_start(dc->pc);
1499 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
1500 if (get_Mode(bundle)) {
1501 notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
1502 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1503 notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
1504 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1505 notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
1506 } else {
1507 notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
1508 qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
1509 notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
1511 qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
1513 for (i = dc->num_wb - 1; i >= 0; --i) {
1514 DisasContextTemp *wb = &dc->wb[i];
1515 if (wb->reg < TILEGX_R_COUNT) {
1516 tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
1518 tcg_temp_free_i64(wb->val);
1521 if (dc->jmp.cond != TCG_COND_NEVER) {
1522 if (dc->jmp.cond == TCG_COND_ALWAYS) {
1523 tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
1524 } else {
1525 TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
1526 tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
1527 dc->jmp.val1, load_zero(dc),
1528 dc->jmp.dest, next);
1529 tcg_temp_free_i64(dc->jmp.val1);
1530 tcg_temp_free_i64(next);
1532 tcg_temp_free_i64(dc->jmp.dest);
1533 tcg_gen_exit_tb(0);
1534 dc->exit_tb = true;
1538 static inline void gen_intermediate_code_internal(TileGXCPU *cpu,
1539 TranslationBlock *tb,
1540 bool search_pc)
1542 DisasContext ctx;
1543 DisasContext *dc = &ctx;
1544 CPUState *cs = CPU(cpu);
1545 CPUTLGState *env = &cpu->env;
1546 uint64_t pc_start = tb->pc;
1547 uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1548 int j, lj = -1;
1549 int num_insns = 0;
1550 int max_insns = tb->cflags & CF_COUNT_MASK;
1552 dc->pc = pc_start;
1553 dc->mmuidx = 0;
1554 dc->exit_tb = false;
1555 dc->jmp.cond = TCG_COND_NEVER;
1556 TCGV_UNUSED_I64(dc->jmp.dest);
1557 TCGV_UNUSED_I64(dc->jmp.val1);
1558 TCGV_UNUSED_I64(dc->zero);
1560 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1561 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1563 if (!max_insns) {
1564 max_insns = CF_COUNT_MASK;
1566 if (cs->singlestep_enabled || singlestep) {
1567 max_insns = 1;
1569 gen_tb_start(tb);
1571 while (1) {
1572 if (search_pc) {
1573 j = tcg_op_buf_count();
1574 if (lj < j) {
1575 lj++;
1576 while (lj < j) {
1577 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1580 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1581 tcg_ctx.gen_opc_instr_start[lj] = 1;
1582 tcg_ctx.gen_opc_icount[lj] = num_insns;
1584 translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
1586 if (dc->exit_tb) {
1587 /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
1588 break;
1590 dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
1591 if (++num_insns >= max_insns
1592 || dc->pc >= next_page_start
1593 || tcg_op_buf_full()) {
1594 /* Ending the TB due to TB size or page boundary. Set PC. */
1595 tcg_gen_movi_tl(cpu_pc, dc->pc);
1596 tcg_gen_exit_tb(0);
1597 break;
1601 gen_tb_end(tb, num_insns);
1602 if (search_pc) {
1603 j = tcg_op_buf_count();
1604 lj++;
1605 while (lj <= j) {
1606 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1608 } else {
1609 tb->size = dc->pc - pc_start;
1610 tb->icount = num_insns;
1613 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
1616 void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
1618 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, false);
1621 void gen_intermediate_code_pc(CPUTLGState *env, struct TranslationBlock *tb)
1623 gen_intermediate_code_internal(tilegx_env_get_cpu(env), tb, true);
1626 void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb, int pc_pos)
1628 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1631 void tilegx_tcg_init(void)
1633 int i;
1635 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1636 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUTLGState, pc), "pc");
1637 for (i = 0; i < TILEGX_R_COUNT; i++) {
1638 cpu_regs[i] = tcg_global_mem_new_i64(TCG_AREG0,
1639 offsetof(CPUTLGState, regs[i]),
1640 reg_names[i]);