2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 /* Functions for load/save to/from memory */
120 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
121 int16_t con
, TCGMemOp mop
)
123 TCGv temp
= tcg_temp_new();
124 tcg_gen_addi_tl(temp
, r2
, con
);
125 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
129 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
130 int16_t con
, TCGMemOp mop
)
132 TCGv temp
= tcg_temp_new();
133 tcg_gen_addi_tl(temp
, r2
, con
);
134 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
138 /* Functions for arithmetic instructions */
140 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
142 TCGv t0
= tcg_temp_new_i32();
143 TCGv result
= tcg_temp_new_i32();
144 /* Addition and set V/SV bits */
145 tcg_gen_add_tl(result
, r1
, r2
);
147 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
148 tcg_gen_xor_tl(t0
, r1
, r2
);
149 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
151 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
152 /* Calc AV/SAV bits */
153 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
154 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
156 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
157 /* write back result */
158 tcg_gen_mov_tl(ret
, result
);
160 tcg_temp_free(result
);
164 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
166 TCGv temp
= tcg_const_i32(r2
);
167 gen_add_d(ret
, r1
, temp
);
171 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
174 TCGv temp
= tcg_temp_new();
175 TCGv temp2
= tcg_temp_new();
176 TCGv result
= tcg_temp_new();
177 TCGv mask
= tcg_temp_new();
178 TCGv t0
= tcg_const_i32(0);
180 /* create mask for sticky bits */
181 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
182 tcg_gen_shli_tl(mask
, mask
, 31);
184 tcg_gen_add_tl(result
, r1
, r2
);
186 tcg_gen_xor_tl(temp
, result
, r1
);
187 tcg_gen_xor_tl(temp2
, r1
, r2
);
188 tcg_gen_andc_tl(temp
, temp
, temp2
);
189 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
191 tcg_gen_and_tl(temp
, temp
, mask
);
192 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
194 tcg_gen_add_tl(temp
, result
, result
);
195 tcg_gen_xor_tl(temp
, temp
, result
);
196 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
198 tcg_gen_and_tl(temp
, temp
, mask
);
199 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
200 /* write back result */
201 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
205 tcg_temp_free(temp2
);
206 tcg_temp_free(result
);
210 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
213 TCGv temp
= tcg_const_i32(r2
);
214 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
218 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
220 TCGv temp
= tcg_temp_new_i32();
221 TCGv result
= tcg_temp_new_i32();
223 tcg_gen_sub_tl(result
, r1
, r2
);
225 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
226 tcg_gen_xor_tl(temp
, r1
, r2
);
227 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
229 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
231 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
232 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
234 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
235 /* write back result */
236 tcg_gen_mov_tl(ret
, result
);
239 tcg_temp_free(result
);
242 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
244 TCGv high
= tcg_temp_new();
245 TCGv low
= tcg_temp_new();
247 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
248 tcg_gen_mov_tl(ret
, low
);
250 tcg_gen_sari_tl(low
, low
, 31);
251 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
252 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
254 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
256 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
257 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
259 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
265 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
267 TCGv sat_neg
= tcg_const_i32(low
);
268 TCGv temp
= tcg_const_i32(up
);
270 /* sat_neg = (arg < low ) ? low : arg; */
271 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
273 /* ret = (sat_neg > up ) ? up : sat_neg; */
274 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
276 tcg_temp_free(sat_neg
);
280 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
282 TCGv temp
= tcg_const_i32(up
);
283 /* sat_neg = (arg > up ) ? up : arg; */
284 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
288 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
290 if (shift_count
== -32) {
291 tcg_gen_movi_tl(ret
, 0);
292 } else if (shift_count
>= 0) {
293 tcg_gen_shli_tl(ret
, r1
, shift_count
);
295 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
299 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
301 uint32_t msk
, msk_start
;
302 TCGv temp
= tcg_temp_new();
303 TCGv temp2
= tcg_temp_new();
304 TCGv t_0
= tcg_const_i32(0);
306 if (shift_count
== 0) {
307 /* Clear PSW.C and PSW.V */
308 tcg_gen_movi_tl(cpu_PSW_C
, 0);
309 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
310 tcg_gen_mov_tl(ret
, r1
);
311 } else if (shift_count
== -32) {
313 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
314 /* fill ret completly with sign bit */
315 tcg_gen_sari_tl(ret
, r1
, 31);
317 tcg_gen_movi_tl(cpu_PSW_V
, 0);
318 } else if (shift_count
> 0) {
319 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
320 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
323 msk_start
= 32 - shift_count
;
324 msk
= ((1 << shift_count
) - 1) << msk_start
;
325 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
327 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
328 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
329 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
330 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
332 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
334 tcg_gen_shli_tl(ret
, r1
, shift_count
);
336 tcg_temp_free(t_max
);
337 tcg_temp_free(t_min
);
340 tcg_gen_movi_tl(cpu_PSW_V
, 0);
342 msk
= (1 << -shift_count
) - 1;
343 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
345 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
347 /* calc av overflow bit */
348 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
349 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
350 /* calc sav overflow bit */
351 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
354 tcg_temp_free(temp2
);
358 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
360 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
363 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
365 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
368 /* helpers for generating program flow micro-ops */
370 static inline void gen_save_pc(target_ulong pc
)
372 tcg_gen_movi_tl(cpu_PC
, pc
);
375 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
377 TranslationBlock
*tb
;
379 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
380 likely(!ctx
->singlestep_enabled
)) {
383 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
386 if (ctx
->singlestep_enabled
) {
387 /* raise exception debug */
393 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
394 TCGv r2
, int16_t address
)
397 jumpLabel
= gen_new_label();
398 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
400 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
402 gen_set_label(jumpLabel
);
403 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
406 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
407 int r2
, int16_t address
)
409 TCGv temp
= tcg_const_i32(r2
);
410 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
414 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
417 l1
= gen_new_label();
419 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
420 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
421 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
423 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
426 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
427 int r2
, int32_t constant
, int32_t offset
)
432 /* SB-format jumps */
435 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
437 case OPC1_16_SB_CALL
:
438 gen_helper_1arg(call
, ctx
->next_pc
);
439 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
442 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
445 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
447 /* SBC-format jumps */
448 case OPC1_16_SBC_JEQ
:
449 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
451 case OPC1_16_SBC_JNE
:
452 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
454 /* SBRN-format jumps */
455 case OPC1_16_SBRN_JZ_T
:
456 temp
= tcg_temp_new();
457 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
458 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
461 case OPC1_16_SBRN_JNZ_T
:
462 temp
= tcg_temp_new();
463 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
464 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
467 /* SBR-format jumps */
468 case OPC1_16_SBR_JEQ
:
469 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
472 case OPC1_16_SBR_JNE
:
473 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
476 case OPC1_16_SBR_JNZ
:
477 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
479 case OPC1_16_SBR_JNZ_A
:
480 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
482 case OPC1_16_SBR_JGEZ
:
483 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
485 case OPC1_16_SBR_JGTZ
:
486 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
488 case OPC1_16_SBR_JLEZ
:
489 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
491 case OPC1_16_SBR_JLTZ
:
492 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
495 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
497 case OPC1_16_SBR_JZ_A
:
498 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
500 case OPC1_16_SBR_LOOP
:
501 gen_loop(ctx
, r1
, offset
* 2 - 32);
503 /* SR-format jumps */
505 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
509 gen_helper_ret(cpu_env
);
513 printf("Branch Error at %x\n", ctx
->pc
);
515 ctx
->bstate
= BS_BRANCH
;
520 * Functions for decoding instructions
523 static void decode_src_opc(DisasContext
*ctx
, int op1
)
529 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
530 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
533 case OPC1_16_SRC_ADD
:
534 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
536 case OPC1_16_SRC_ADD_A15
:
537 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
539 case OPC1_16_SRC_ADD_15A
:
540 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
542 case OPC1_16_SRC_ADD_A
:
543 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
545 case OPC1_16_SRC_CADD
:
546 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
549 case OPC1_16_SRC_CADDN
:
550 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
553 case OPC1_16_SRC_CMOV
:
554 temp
= tcg_const_tl(0);
555 temp2
= tcg_const_tl(const4
);
556 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
557 temp2
, cpu_gpr_d
[r1
]);
559 tcg_temp_free(temp2
);
561 case OPC1_16_SRC_CMOVN
:
562 temp
= tcg_const_tl(0);
563 temp2
= tcg_const_tl(const4
);
564 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
565 temp2
, cpu_gpr_d
[r1
]);
567 tcg_temp_free(temp2
);
570 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
574 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
577 case OPC1_16_SRC_MOV
:
578 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
580 case OPC1_16_SRC_MOV_A
:
581 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
582 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
585 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
587 case OPC1_16_SRC_SHA
:
588 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
593 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
598 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
599 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
602 case OPC1_16_SRR_ADD
:
603 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
605 case OPC1_16_SRR_ADD_A15
:
606 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
608 case OPC1_16_SRR_ADD_15A
:
609 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
611 case OPC1_16_SRR_ADD_A
:
612 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
614 case OPC1_16_SRR_ADDS
:
615 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
617 case OPC1_16_SRR_AND
:
618 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
620 case OPC1_16_SRR_CMOV
:
621 temp
= tcg_const_tl(0);
622 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
623 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
626 case OPC1_16_SRR_CMOVN
:
627 temp
= tcg_const_tl(0);
628 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
629 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
633 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
637 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
640 case OPC1_16_SRR_MOV
:
641 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
643 case OPC1_16_SRR_MOV_A
:
644 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
646 case OPC1_16_SRR_MOV_AA
:
647 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
649 case OPC1_16_SRR_MOV_D
:
650 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
652 case OPC1_16_SRR_MUL
:
653 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
656 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
658 case OPC1_16_SRR_SUB
:
659 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
661 case OPC1_16_SRR_SUB_A15B
:
662 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
664 case OPC1_16_SRR_SUB_15AB
:
665 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
667 case OPC1_16_SRR_SUBS
:
668 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
670 case OPC1_16_SRR_XOR
:
671 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
676 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
680 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
681 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
684 case OPC1_16_SSR_ST_A
:
685 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
687 case OPC1_16_SSR_ST_A_POSTINC
:
688 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
689 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
691 case OPC1_16_SSR_ST_B
:
692 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
694 case OPC1_16_SSR_ST_B_POSTINC
:
695 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
696 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
698 case OPC1_16_SSR_ST_H
:
699 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
701 case OPC1_16_SSR_ST_H_POSTINC
:
702 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
703 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
705 case OPC1_16_SSR_ST_W
:
706 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
708 case OPC1_16_SSR_ST_W_POSTINC
:
709 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
710 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
715 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
719 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
723 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
725 case OPC1_16_SC_BISR
:
726 gen_helper_1arg(bisr
, const16
& 0xff);
728 case OPC1_16_SC_LD_A
:
729 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
731 case OPC1_16_SC_LD_W
:
732 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
735 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
738 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
740 case OPC1_16_SC_ST_A
:
741 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
743 case OPC1_16_SC_ST_W
:
744 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
746 case OPC1_16_SC_SUB_A
:
747 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
752 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
756 r1
= MASK_OP_SLR_D(ctx
->opcode
);
757 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
761 case OPC1_16_SLR_LD_A
:
762 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
764 case OPC1_16_SLR_LD_A_POSTINC
:
765 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
766 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
768 case OPC1_16_SLR_LD_BU
:
769 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
771 case OPC1_16_SLR_LD_BU_POSTINC
:
772 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
773 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
775 case OPC1_16_SLR_LD_H
:
776 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
778 case OPC1_16_SLR_LD_H_POSTINC
:
779 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
780 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
782 case OPC1_16_SLR_LD_W
:
783 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
785 case OPC1_16_SLR_LD_W_POSTINC
:
786 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
787 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
792 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
797 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
798 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
802 case OPC1_16_SRO_LD_A
:
803 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
805 case OPC1_16_SRO_LD_BU
:
806 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
808 case OPC1_16_SRO_LD_H
:
809 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
811 case OPC1_16_SRO_LD_W
:
812 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
814 case OPC1_16_SRO_ST_A
:
815 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
817 case OPC1_16_SRO_ST_B
:
818 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
820 case OPC1_16_SRO_ST_H
:
821 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
823 case OPC1_16_SRO_ST_W
:
824 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
829 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
832 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
838 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
841 gen_helper_rfe(cpu_env
);
843 ctx
->bstate
= BS_BRANCH
;
845 case OPC2_16_SR_DEBUG
:
846 /* raise EXCP_DEBUG */
851 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
857 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
858 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
861 case OPC2_16_SR_RSUB
:
862 /* overflow only if r1 = -0x80000000 */
863 temp
= tcg_const_i32(-0x80000000);
865 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
866 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
868 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
870 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
872 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
873 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
875 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
878 case OPC2_16_SR_SAT_B
:
879 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
881 case OPC2_16_SR_SAT_BU
:
882 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
884 case OPC2_16_SR_SAT_H
:
885 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
887 case OPC2_16_SR_SAT_HU
:
888 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
893 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
901 op1
= MASK_OP_MAJOR(ctx
->opcode
);
903 /* handle ADDSC.A opcode only being 6 bit long */
904 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
905 op1
= OPC1_16_SRRS_ADDSC_A
;
909 case OPC1_16_SRC_ADD
:
910 case OPC1_16_SRC_ADD_A15
:
911 case OPC1_16_SRC_ADD_15A
:
912 case OPC1_16_SRC_ADD_A
:
913 case OPC1_16_SRC_CADD
:
914 case OPC1_16_SRC_CADDN
:
915 case OPC1_16_SRC_CMOV
:
916 case OPC1_16_SRC_CMOVN
:
919 case OPC1_16_SRC_MOV
:
920 case OPC1_16_SRC_MOV_A
:
922 case OPC1_16_SRC_SHA
:
923 decode_src_opc(ctx
, op1
);
926 case OPC1_16_SRR_ADD
:
927 case OPC1_16_SRR_ADD_A15
:
928 case OPC1_16_SRR_ADD_15A
:
929 case OPC1_16_SRR_ADD_A
:
930 case OPC1_16_SRR_ADDS
:
931 case OPC1_16_SRR_AND
:
932 case OPC1_16_SRR_CMOV
:
933 case OPC1_16_SRR_CMOVN
:
936 case OPC1_16_SRR_MOV
:
937 case OPC1_16_SRR_MOV_A
:
938 case OPC1_16_SRR_MOV_AA
:
939 case OPC1_16_SRR_MOV_D
:
940 case OPC1_16_SRR_MUL
:
942 case OPC1_16_SRR_SUB
:
943 case OPC1_16_SRR_SUB_A15B
:
944 case OPC1_16_SRR_SUB_15AB
:
945 case OPC1_16_SRR_SUBS
:
946 case OPC1_16_SRR_XOR
:
947 decode_srr_opc(ctx
, op1
);
950 case OPC1_16_SSR_ST_A
:
951 case OPC1_16_SSR_ST_A_POSTINC
:
952 case OPC1_16_SSR_ST_B
:
953 case OPC1_16_SSR_ST_B_POSTINC
:
954 case OPC1_16_SSR_ST_H
:
955 case OPC1_16_SSR_ST_H_POSTINC
:
956 case OPC1_16_SSR_ST_W
:
957 case OPC1_16_SSR_ST_W_POSTINC
:
958 decode_ssr_opc(ctx
, op1
);
961 case OPC1_16_SRRS_ADDSC_A
:
962 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
963 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
964 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
965 temp
= tcg_temp_new();
966 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
967 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
971 case OPC1_16_SLRO_LD_A
:
972 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
973 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
974 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
976 case OPC1_16_SLRO_LD_BU
:
977 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
978 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
979 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
981 case OPC1_16_SLRO_LD_H
:
982 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
983 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
984 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
986 case OPC1_16_SLRO_LD_W
:
987 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
988 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
989 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
992 case OPC1_16_SB_CALL
:
996 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
997 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
1000 case OPC1_16_SBC_JEQ
:
1001 case OPC1_16_SBC_JNE
:
1002 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
1003 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
1004 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1007 case OPC1_16_SBRN_JNZ_T
:
1008 case OPC1_16_SBRN_JZ_T
:
1009 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
1010 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
1011 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1014 case OPC1_16_SBR_JEQ
:
1015 case OPC1_16_SBR_JGEZ
:
1016 case OPC1_16_SBR_JGTZ
:
1017 case OPC1_16_SBR_JLEZ
:
1018 case OPC1_16_SBR_JLTZ
:
1019 case OPC1_16_SBR_JNE
:
1020 case OPC1_16_SBR_JNZ
:
1021 case OPC1_16_SBR_JNZ_A
:
1022 case OPC1_16_SBR_JZ
:
1023 case OPC1_16_SBR_JZ_A
:
1024 case OPC1_16_SBR_LOOP
:
1025 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
1026 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
1027 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
1030 case OPC1_16_SC_AND
:
1031 case OPC1_16_SC_BISR
:
1032 case OPC1_16_SC_LD_A
:
1033 case OPC1_16_SC_LD_W
:
1034 case OPC1_16_SC_MOV
:
1036 case OPC1_16_SC_ST_A
:
1037 case OPC1_16_SC_ST_W
:
1038 case OPC1_16_SC_SUB_A
:
1039 decode_sc_opc(ctx
, op1
);
1042 case OPC1_16_SLR_LD_A
:
1043 case OPC1_16_SLR_LD_A_POSTINC
:
1044 case OPC1_16_SLR_LD_BU
:
1045 case OPC1_16_SLR_LD_BU_POSTINC
:
1046 case OPC1_16_SLR_LD_H
:
1047 case OPC1_16_SLR_LD_H_POSTINC
:
1048 case OPC1_16_SLR_LD_W
:
1049 case OPC1_16_SLR_LD_W_POSTINC
:
1050 decode_slr_opc(ctx
, op1
);
1053 case OPC1_16_SRO_LD_A
:
1054 case OPC1_16_SRO_LD_BU
:
1055 case OPC1_16_SRO_LD_H
:
1056 case OPC1_16_SRO_LD_W
:
1057 case OPC1_16_SRO_ST_A
:
1058 case OPC1_16_SRO_ST_B
:
1059 case OPC1_16_SRO_ST_H
:
1060 case OPC1_16_SRO_ST_W
:
1061 decode_sro_opc(ctx
, op1
);
1064 case OPC1_16_SSRO_ST_A
:
1065 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1066 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1067 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1069 case OPC1_16_SSRO_ST_B
:
1070 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1071 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1072 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1074 case OPC1_16_SSRO_ST_H
:
1075 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1076 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1077 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1079 case OPC1_16_SSRO_ST_W
:
1080 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1081 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1082 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1085 case OPCM_16_SR_SYSTEM
:
1086 decode_sr_system(env
, ctx
);
1088 case OPCM_16_SR_ACCU
:
1089 decode_sr_accu(env
, ctx
);
1092 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1093 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
1095 case OPC1_16_SR_NOT
:
1096 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1097 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1102 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1106 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
1108 /* 16-Bit Instruction */
1109 if ((ctx
->opcode
& 0x1) == 0) {
1110 ctx
->next_pc
= ctx
->pc
+ 2;
1111 decode_16Bit_opc(env
, ctx
);
1112 /* 32-Bit Instruction */
1114 ctx
->next_pc
= ctx
->pc
+ 4;
1115 decode_32Bit_opc(env
, ctx
);
1120 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
1123 CPUState
*cs
= CPU(cpu
);
1124 CPUTriCoreState
*env
= &cpu
->env
;
1126 target_ulong pc_start
;
1128 uint16_t *gen_opc_end
;
1131 qemu_log("search pc %d\n", search_pc
);
1136 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1140 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1141 ctx
.bstate
= BS_NONE
;
1142 ctx
.mem_idx
= cpu_mmu_index(env
);
1144 tcg_clear_temp_count();
1146 while (ctx
.bstate
== BS_NONE
) {
1147 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
1148 decode_opc(env
, &ctx
, 0);
1152 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
1153 gen_save_pc(ctx
.next_pc
);
1158 gen_save_pc(ctx
.next_pc
);
1162 ctx
.pc
= ctx
.next_pc
;
1165 gen_tb_end(tb
, num_insns
);
1166 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1168 printf("done_generating search pc\n");
1170 tb
->size
= ctx
.pc
- pc_start
;
1171 tb
->icount
= num_insns
;
1173 if (tcg_check_temp_count()) {
1174 printf("LEAK at %08x\n", env
->PC
);
1178 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1179 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1180 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1187 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1189 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
1193 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1195 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
1199 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
1201 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1209 void cpu_state_reset(CPUTriCoreState
*env
)
1211 /* Reset Regs to Default Value */
1215 static void tricore_tcg_init_csfr(void)
1217 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
1218 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
1219 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
1220 offsetof(CPUTriCoreState
, PSW
), "PSW");
1221 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
1222 offsetof(CPUTriCoreState
, PC
), "PC");
1223 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
1224 offsetof(CPUTriCoreState
, ICR
), "ICR");
1227 void tricore_tcg_init(void)
1234 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1236 for (i
= 0 ; i
< 16 ; i
++) {
1237 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
1238 offsetof(CPUTriCoreState
, gpr_a
[i
]),
1241 for (i
= 0 ; i
< 16 ; i
++) {
1242 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
1243 offsetof(CPUTriCoreState
, gpr_d
[i
]),
1246 tricore_tcg_init_csfr();
1247 /* init PSW flag cache */
1248 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
1249 offsetof(CPUTriCoreState
, PSW_USB_C
),
1251 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
1252 offsetof(CPUTriCoreState
, PSW_USB_V
),
1254 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
1255 offsetof(CPUTriCoreState
, PSW_USB_SV
),
1257 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
1258 offsetof(CPUTriCoreState
, PSW_USB_AV
),
1260 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
1261 offsetof(CPUTriCoreState
, PSW_USB_SAV
),