2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 /* Functions for load/save to/from memory */
120 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
121 int16_t con
, TCGMemOp mop
)
123 TCGv temp
= tcg_temp_new();
124 tcg_gen_addi_tl(temp
, r2
, con
);
125 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
129 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
130 int16_t con
, TCGMemOp mop
)
132 TCGv temp
= tcg_temp_new();
133 tcg_gen_addi_tl(temp
, r2
, con
);
134 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
138 /* Functions for arithmetic instructions */
140 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
142 TCGv t0
= tcg_temp_new_i32();
143 TCGv result
= tcg_temp_new_i32();
144 /* Addition and set V/SV bits */
145 tcg_gen_add_tl(result
, r1
, r2
);
147 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
148 tcg_gen_xor_tl(t0
, r1
, r2
);
149 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
151 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
152 /* Calc AV/SAV bits */
153 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
154 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
156 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
157 /* write back result */
158 tcg_gen_mov_tl(ret
, result
);
160 tcg_temp_free(result
);
164 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
166 TCGv temp
= tcg_const_i32(r2
);
167 gen_add_d(ret
, r1
, temp
);
171 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
174 TCGv temp
= tcg_temp_new();
175 TCGv temp2
= tcg_temp_new();
176 TCGv result
= tcg_temp_new();
177 TCGv mask
= tcg_temp_new();
178 TCGv t0
= tcg_const_i32(0);
180 /* create mask for sticky bits */
181 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
182 tcg_gen_shli_tl(mask
, mask
, 31);
184 tcg_gen_add_tl(result
, r1
, r2
);
186 tcg_gen_xor_tl(temp
, result
, r1
);
187 tcg_gen_xor_tl(temp2
, r1
, r2
);
188 tcg_gen_andc_tl(temp
, temp
, temp2
);
189 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
191 tcg_gen_and_tl(temp
, temp
, mask
);
192 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
194 tcg_gen_add_tl(temp
, result
, result
);
195 tcg_gen_xor_tl(temp
, temp
, result
);
196 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
198 tcg_gen_and_tl(temp
, temp
, mask
);
199 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
200 /* write back result */
201 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
205 tcg_temp_free(temp2
);
206 tcg_temp_free(result
);
210 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
213 TCGv temp
= tcg_const_i32(r2
);
214 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
218 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
220 TCGv temp
= tcg_temp_new_i32();
221 TCGv result
= tcg_temp_new_i32();
223 tcg_gen_sub_tl(result
, r1
, r2
);
225 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
226 tcg_gen_xor_tl(temp
, r1
, r2
);
227 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
229 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
231 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
232 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
234 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
235 /* write back result */
236 tcg_gen_mov_tl(ret
, result
);
239 tcg_temp_free(result
);
242 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
244 TCGv high
= tcg_temp_new();
245 TCGv low
= tcg_temp_new();
247 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
248 tcg_gen_mov_tl(ret
, low
);
250 tcg_gen_sari_tl(low
, low
, 31);
251 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
252 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
254 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
256 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
257 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
259 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
265 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
267 if (shift_count
== -32) {
268 tcg_gen_movi_tl(ret
, 0);
269 } else if (shift_count
>= 0) {
270 tcg_gen_shli_tl(ret
, r1
, shift_count
);
272 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
276 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
278 uint32_t msk
, msk_start
;
279 TCGv temp
= tcg_temp_new();
280 TCGv temp2
= tcg_temp_new();
281 TCGv t_0
= tcg_const_i32(0);
283 if (shift_count
== 0) {
284 /* Clear PSW.C and PSW.V */
285 tcg_gen_movi_tl(cpu_PSW_C
, 0);
286 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
287 tcg_gen_mov_tl(ret
, r1
);
288 } else if (shift_count
== -32) {
290 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
291 /* fill ret completly with sign bit */
292 tcg_gen_sari_tl(ret
, r1
, 31);
294 tcg_gen_movi_tl(cpu_PSW_V
, 0);
295 } else if (shift_count
> 0) {
296 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
297 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
300 msk_start
= 32 - shift_count
;
301 msk
= ((1 << shift_count
) - 1) << msk_start
;
302 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
304 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
305 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
306 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
307 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
309 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
311 tcg_gen_shli_tl(ret
, r1
, shift_count
);
313 tcg_temp_free(t_max
);
314 tcg_temp_free(t_min
);
317 tcg_gen_movi_tl(cpu_PSW_V
, 0);
319 msk
= (1 << -shift_count
) - 1;
320 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
322 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
324 /* calc av overflow bit */
325 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
326 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
327 /* calc sav overflow bit */
328 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
331 tcg_temp_free(temp2
);
335 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
337 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
340 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
342 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
345 /* helpers for generating program flow micro-ops */
347 static inline void gen_save_pc(target_ulong pc
)
349 tcg_gen_movi_tl(cpu_PC
, pc
);
352 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
354 TranslationBlock
*tb
;
356 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
357 likely(!ctx
->singlestep_enabled
)) {
360 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
363 if (ctx
->singlestep_enabled
) {
364 /* raise exception debug */
370 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
371 TCGv r2
, int16_t address
)
374 jumpLabel
= gen_new_label();
375 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
377 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
379 gen_set_label(jumpLabel
);
380 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
383 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
384 int r2
, int16_t address
)
386 TCGv temp
= tcg_const_i32(r2
);
387 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
391 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
394 l1
= gen_new_label();
396 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
397 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
398 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
400 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
403 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
404 int r2
, int32_t constant
, int32_t offset
)
409 /* SB-format jumps */
412 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
414 case OPC1_16_SB_CALL
:
415 gen_helper_1arg(call
, ctx
->next_pc
);
416 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
419 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
422 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
424 /* SBC-format jumps */
425 case OPC1_16_SBC_JEQ
:
426 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
428 case OPC1_16_SBC_JNE
:
429 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
431 /* SBRN-format jumps */
432 case OPC1_16_SBRN_JZ_T
:
433 temp
= tcg_temp_new();
434 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
435 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
438 case OPC1_16_SBRN_JNZ_T
:
439 temp
= tcg_temp_new();
440 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
441 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
444 /* SBR-format jumps */
445 case OPC1_16_SBR_JEQ
:
446 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
449 case OPC1_16_SBR_JNE
:
450 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
453 case OPC1_16_SBR_JNZ
:
454 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
456 case OPC1_16_SBR_JNZ_A
:
457 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
459 case OPC1_16_SBR_JGEZ
:
460 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
462 case OPC1_16_SBR_JGTZ
:
463 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
465 case OPC1_16_SBR_JLEZ
:
466 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
468 case OPC1_16_SBR_JLTZ
:
469 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
472 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
474 case OPC1_16_SBR_JZ_A
:
475 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
477 case OPC1_16_SBR_LOOP
:
478 gen_loop(ctx
, r1
, offset
* 2 - 32);
481 printf("Branch Error at %x\n", ctx
->pc
);
483 ctx
->bstate
= BS_BRANCH
;
488 * Functions for decoding instructions
491 static void decode_src_opc(DisasContext
*ctx
, int op1
)
497 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
498 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
501 case OPC1_16_SRC_ADD
:
502 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
504 case OPC1_16_SRC_ADD_A15
:
505 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
507 case OPC1_16_SRC_ADD_15A
:
508 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
510 case OPC1_16_SRC_ADD_A
:
511 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
513 case OPC1_16_SRC_CADD
:
514 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
517 case OPC1_16_SRC_CADDN
:
518 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
521 case OPC1_16_SRC_CMOV
:
522 temp
= tcg_const_tl(0);
523 temp2
= tcg_const_tl(const4
);
524 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
525 temp2
, cpu_gpr_d
[r1
]);
527 tcg_temp_free(temp2
);
529 case OPC1_16_SRC_CMOVN
:
530 temp
= tcg_const_tl(0);
531 temp2
= tcg_const_tl(const4
);
532 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
533 temp2
, cpu_gpr_d
[r1
]);
535 tcg_temp_free(temp2
);
538 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
542 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
545 case OPC1_16_SRC_MOV
:
546 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
548 case OPC1_16_SRC_MOV_A
:
549 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
550 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
553 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
555 case OPC1_16_SRC_SHA
:
556 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
561 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
566 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
567 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
570 case OPC1_16_SRR_ADD
:
571 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
573 case OPC1_16_SRR_ADD_A15
:
574 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
576 case OPC1_16_SRR_ADD_15A
:
577 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
579 case OPC1_16_SRR_ADD_A
:
580 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
582 case OPC1_16_SRR_ADDS
:
583 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
585 case OPC1_16_SRR_AND
:
586 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
588 case OPC1_16_SRR_CMOV
:
589 temp
= tcg_const_tl(0);
590 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
591 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
594 case OPC1_16_SRR_CMOVN
:
595 temp
= tcg_const_tl(0);
596 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
597 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
601 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
605 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
608 case OPC1_16_SRR_MOV
:
609 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
611 case OPC1_16_SRR_MOV_A
:
612 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
614 case OPC1_16_SRR_MOV_AA
:
615 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
617 case OPC1_16_SRR_MOV_D
:
618 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
620 case OPC1_16_SRR_MUL
:
621 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
624 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
626 case OPC1_16_SRR_SUB
:
627 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
629 case OPC1_16_SRR_SUB_A15B
:
630 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
632 case OPC1_16_SRR_SUB_15AB
:
633 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
635 case OPC1_16_SRR_SUBS
:
636 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
638 case OPC1_16_SRR_XOR
:
639 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
644 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
648 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
649 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
652 case OPC1_16_SSR_ST_A
:
653 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
655 case OPC1_16_SSR_ST_A_POSTINC
:
656 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
657 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
659 case OPC1_16_SSR_ST_B
:
660 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
662 case OPC1_16_SSR_ST_B_POSTINC
:
663 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
664 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
666 case OPC1_16_SSR_ST_H
:
667 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
669 case OPC1_16_SSR_ST_H_POSTINC
:
670 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
671 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
673 case OPC1_16_SSR_ST_W
:
674 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
676 case OPC1_16_SSR_ST_W_POSTINC
:
677 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
678 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
683 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
687 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
691 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
693 case OPC1_16_SC_BISR
:
694 gen_helper_1arg(bisr
, const16
& 0xff);
696 case OPC1_16_SC_LD_A
:
697 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
699 case OPC1_16_SC_LD_W
:
700 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
703 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
706 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
708 case OPC1_16_SC_ST_A
:
709 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
711 case OPC1_16_SC_ST_W
:
712 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
714 case OPC1_16_SC_SUB_A
:
715 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
720 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
724 r1
= MASK_OP_SLR_D(ctx
->opcode
);
725 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
729 case OPC1_16_SLR_LD_A
:
730 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
732 case OPC1_16_SLR_LD_A_POSTINC
:
733 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
734 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
736 case OPC1_16_SLR_LD_BU
:
737 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
739 case OPC1_16_SLR_LD_BU_POSTINC
:
740 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
741 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
743 case OPC1_16_SLR_LD_H
:
744 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
746 case OPC1_16_SLR_LD_H_POSTINC
:
747 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
748 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
750 case OPC1_16_SLR_LD_W
:
751 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
753 case OPC1_16_SLR_LD_W_POSTINC
:
754 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
755 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
760 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
765 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
766 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
770 case OPC1_16_SRO_LD_A
:
771 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
773 case OPC1_16_SRO_LD_BU
:
774 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
776 case OPC1_16_SRO_LD_H
:
777 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
779 case OPC1_16_SRO_LD_W
:
780 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
782 case OPC1_16_SRO_ST_A
:
783 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
785 case OPC1_16_SRO_ST_B
:
786 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
788 case OPC1_16_SRO_ST_H
:
789 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
791 case OPC1_16_SRO_ST_W
:
792 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
797 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
805 op1
= MASK_OP_MAJOR(ctx
->opcode
);
807 /* handle ADDSC.A opcode only being 6 bit long */
808 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
809 op1
= OPC1_16_SRRS_ADDSC_A
;
813 case OPC1_16_SRC_ADD
:
814 case OPC1_16_SRC_ADD_A15
:
815 case OPC1_16_SRC_ADD_15A
:
816 case OPC1_16_SRC_ADD_A
:
817 case OPC1_16_SRC_CADD
:
818 case OPC1_16_SRC_CADDN
:
819 case OPC1_16_SRC_CMOV
:
820 case OPC1_16_SRC_CMOVN
:
823 case OPC1_16_SRC_MOV
:
824 case OPC1_16_SRC_MOV_A
:
826 case OPC1_16_SRC_SHA
:
827 decode_src_opc(ctx
, op1
);
830 case OPC1_16_SRR_ADD
:
831 case OPC1_16_SRR_ADD_A15
:
832 case OPC1_16_SRR_ADD_15A
:
833 case OPC1_16_SRR_ADD_A
:
834 case OPC1_16_SRR_ADDS
:
835 case OPC1_16_SRR_AND
:
836 case OPC1_16_SRR_CMOV
:
837 case OPC1_16_SRR_CMOVN
:
840 case OPC1_16_SRR_MOV
:
841 case OPC1_16_SRR_MOV_A
:
842 case OPC1_16_SRR_MOV_AA
:
843 case OPC1_16_SRR_MOV_D
:
844 case OPC1_16_SRR_MUL
:
846 case OPC1_16_SRR_SUB
:
847 case OPC1_16_SRR_SUB_A15B
:
848 case OPC1_16_SRR_SUB_15AB
:
849 case OPC1_16_SRR_SUBS
:
850 case OPC1_16_SRR_XOR
:
851 decode_srr_opc(ctx
, op1
);
854 case OPC1_16_SSR_ST_A
:
855 case OPC1_16_SSR_ST_A_POSTINC
:
856 case OPC1_16_SSR_ST_B
:
857 case OPC1_16_SSR_ST_B_POSTINC
:
858 case OPC1_16_SSR_ST_H
:
859 case OPC1_16_SSR_ST_H_POSTINC
:
860 case OPC1_16_SSR_ST_W
:
861 case OPC1_16_SSR_ST_W_POSTINC
:
862 decode_ssr_opc(ctx
, op1
);
865 case OPC1_16_SRRS_ADDSC_A
:
866 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
867 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
868 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
869 temp
= tcg_temp_new();
870 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
871 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
875 case OPC1_16_SLRO_LD_A
:
876 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
877 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
878 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
880 case OPC1_16_SLRO_LD_BU
:
881 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
882 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
883 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
885 case OPC1_16_SLRO_LD_H
:
886 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
887 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
888 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
890 case OPC1_16_SLRO_LD_W
:
891 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
892 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
893 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
896 case OPC1_16_SB_CALL
:
900 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
901 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
904 case OPC1_16_SBC_JEQ
:
905 case OPC1_16_SBC_JNE
:
906 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
907 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
908 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
911 case OPC1_16_SBRN_JNZ_T
:
912 case OPC1_16_SBRN_JZ_T
:
913 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
914 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
915 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
918 case OPC1_16_SBR_JEQ
:
919 case OPC1_16_SBR_JGEZ
:
920 case OPC1_16_SBR_JGTZ
:
921 case OPC1_16_SBR_JLEZ
:
922 case OPC1_16_SBR_JLTZ
:
923 case OPC1_16_SBR_JNE
:
924 case OPC1_16_SBR_JNZ
:
925 case OPC1_16_SBR_JNZ_A
:
927 case OPC1_16_SBR_JZ_A
:
928 case OPC1_16_SBR_LOOP
:
929 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
930 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
931 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
935 case OPC1_16_SC_BISR
:
936 case OPC1_16_SC_LD_A
:
937 case OPC1_16_SC_LD_W
:
940 case OPC1_16_SC_ST_A
:
941 case OPC1_16_SC_ST_W
:
942 case OPC1_16_SC_SUB_A
:
943 decode_sc_opc(ctx
, op1
);
946 case OPC1_16_SLR_LD_A
:
947 case OPC1_16_SLR_LD_A_POSTINC
:
948 case OPC1_16_SLR_LD_BU
:
949 case OPC1_16_SLR_LD_BU_POSTINC
:
950 case OPC1_16_SLR_LD_H
:
951 case OPC1_16_SLR_LD_H_POSTINC
:
952 case OPC1_16_SLR_LD_W
:
953 case OPC1_16_SLR_LD_W_POSTINC
:
954 decode_slr_opc(ctx
, op1
);
957 case OPC1_16_SRO_LD_A
:
958 case OPC1_16_SRO_LD_BU
:
959 case OPC1_16_SRO_LD_H
:
960 case OPC1_16_SRO_LD_W
:
961 case OPC1_16_SRO_ST_A
:
962 case OPC1_16_SRO_ST_B
:
963 case OPC1_16_SRO_ST_H
:
964 case OPC1_16_SRO_ST_W
:
965 decode_sro_opc(ctx
, op1
);
968 case OPC1_16_SSRO_ST_A
:
969 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
970 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
971 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
973 case OPC1_16_SSRO_ST_B
:
974 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
975 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
976 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
978 case OPC1_16_SSRO_ST_H
:
979 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
980 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
981 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
983 case OPC1_16_SSRO_ST_W
:
984 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
985 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
986 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
991 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
995 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
997 /* 16-Bit Instruction */
998 if ((ctx
->opcode
& 0x1) == 0) {
999 ctx
->next_pc
= ctx
->pc
+ 2;
1000 decode_16Bit_opc(env
, ctx
);
1001 /* 32-Bit Instruction */
1003 ctx
->next_pc
= ctx
->pc
+ 4;
1004 decode_32Bit_opc(env
, ctx
);
1009 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
1012 CPUState
*cs
= CPU(cpu
);
1013 CPUTriCoreState
*env
= &cpu
->env
;
1015 target_ulong pc_start
;
1017 uint16_t *gen_opc_end
;
1020 qemu_log("search pc %d\n", search_pc
);
1025 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1029 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1030 ctx
.bstate
= BS_NONE
;
1031 ctx
.mem_idx
= cpu_mmu_index(env
);
1033 tcg_clear_temp_count();
1035 while (ctx
.bstate
== BS_NONE
) {
1036 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
1037 decode_opc(env
, &ctx
, 0);
1041 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
1042 gen_save_pc(ctx
.next_pc
);
1047 gen_save_pc(ctx
.next_pc
);
1051 ctx
.pc
= ctx
.next_pc
;
1054 gen_tb_end(tb
, num_insns
);
1055 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1057 printf("done_generating search pc\n");
1059 tb
->size
= ctx
.pc
- pc_start
;
1060 tb
->icount
= num_insns
;
1062 if (tcg_check_temp_count()) {
1063 printf("LEAK at %08x\n", env
->PC
);
1067 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1068 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1069 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1076 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1078 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
1082 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1084 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
1088 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
1090 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1098 void cpu_state_reset(CPUTriCoreState
*env
)
1100 /* Reset Regs to Default Value */
1104 static void tricore_tcg_init_csfr(void)
1106 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
1107 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
1108 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
1109 offsetof(CPUTriCoreState
, PSW
), "PSW");
1110 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
1111 offsetof(CPUTriCoreState
, PC
), "PC");
1112 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
1113 offsetof(CPUTriCoreState
, ICR
), "ICR");
1116 void tricore_tcg_init(void)
1123 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1125 for (i
= 0 ; i
< 16 ; i
++) {
1126 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
1127 offsetof(CPUTriCoreState
, gpr_a
[i
]),
1130 for (i
= 0 ; i
< 16 ; i
++) {
1131 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
1132 offsetof(CPUTriCoreState
, gpr_d
[i
]),
1135 tricore_tcg_init_csfr();
1136 /* init PSW flag cache */
1137 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
1138 offsetof(CPUTriCoreState
, PSW_USB_C
),
1140 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
1141 offsetof(CPUTriCoreState
, PSW_USB_V
),
1143 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
1144 offsetof(CPUTriCoreState
, PSW_USB_SV
),
1146 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
1147 offsetof(CPUTriCoreState
, PSW_USB_AV
),
1149 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
1150 offsetof(CPUTriCoreState
, PSW_USB_SAV
),