2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Functions for load/save to/from memory */
112 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
113 int16_t con
, TCGMemOp mop
)
115 TCGv temp
= tcg_temp_new();
116 tcg_gen_addi_tl(temp
, r2
, con
);
117 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
121 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
122 int16_t con
, TCGMemOp mop
)
124 TCGv temp
= tcg_temp_new();
125 tcg_gen_addi_tl(temp
, r2
, con
);
126 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
130 /* Functions for arithmetic instructions */
132 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
134 TCGv t0
= tcg_temp_new_i32();
135 TCGv result
= tcg_temp_new_i32();
136 /* Addition and set V/SV bits */
137 tcg_gen_add_tl(result
, r1
, r2
);
139 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
140 tcg_gen_xor_tl(t0
, r1
, r2
);
141 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
143 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
144 /* Calc AV/SAV bits */
145 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
146 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
148 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
149 /* write back result */
150 tcg_gen_mov_tl(ret
, result
);
152 tcg_temp_free(result
);
156 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
158 TCGv temp
= tcg_const_i32(r2
);
159 gen_add_d(ret
, r1
, temp
);
163 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
166 TCGv temp
= tcg_temp_new();
167 TCGv temp2
= tcg_temp_new();
168 TCGv result
= tcg_temp_new();
169 TCGv mask
= tcg_temp_new();
170 TCGv t0
= tcg_const_i32(0);
172 /* create mask for sticky bits */
173 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
174 tcg_gen_shli_tl(mask
, mask
, 31);
176 tcg_gen_add_tl(result
, r1
, r2
);
178 tcg_gen_xor_tl(temp
, result
, r1
);
179 tcg_gen_xor_tl(temp2
, r1
, r2
);
180 tcg_gen_andc_tl(temp
, temp
, temp2
);
181 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
183 tcg_gen_and_tl(temp
, temp
, mask
);
184 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
186 tcg_gen_add_tl(temp
, result
, result
);
187 tcg_gen_xor_tl(temp
, temp
, result
);
188 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
190 tcg_gen_and_tl(temp
, temp
, mask
);
191 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
192 /* write back result */
193 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
197 tcg_temp_free(temp2
);
198 tcg_temp_free(result
);
202 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
205 TCGv temp
= tcg_const_i32(r2
);
206 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
210 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
212 TCGv temp
= tcg_temp_new_i32();
213 TCGv result
= tcg_temp_new_i32();
215 tcg_gen_sub_tl(result
, r1
, r2
);
217 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
218 tcg_gen_xor_tl(temp
, r1
, r2
);
219 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
221 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
223 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
224 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
226 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
227 /* write back result */
228 tcg_gen_mov_tl(ret
, result
);
231 tcg_temp_free(result
);
234 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
236 TCGv high
= tcg_temp_new();
237 TCGv low
= tcg_temp_new();
239 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
240 tcg_gen_mov_tl(ret
, low
);
242 tcg_gen_sari_tl(low
, low
, 31);
243 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
244 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
246 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
248 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
249 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
251 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
257 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
259 if (shift_count
== -32) {
260 tcg_gen_movi_tl(ret
, 0);
261 } else if (shift_count
>= 0) {
262 tcg_gen_shli_tl(ret
, r1
, shift_count
);
264 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
268 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
270 uint32_t msk
, msk_start
;
271 TCGv temp
= tcg_temp_new();
272 TCGv temp2
= tcg_temp_new();
273 TCGv t_0
= tcg_const_i32(0);
275 if (shift_count
== 0) {
276 /* Clear PSW.C and PSW.V */
277 tcg_gen_movi_tl(cpu_PSW_C
, 0);
278 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
279 tcg_gen_mov_tl(ret
, r1
);
280 } else if (shift_count
== -32) {
282 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
283 /* fill ret completly with sign bit */
284 tcg_gen_sari_tl(ret
, r1
, 31);
286 tcg_gen_movi_tl(cpu_PSW_V
, 0);
287 } else if (shift_count
> 0) {
288 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
289 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
292 msk_start
= 32 - shift_count
;
293 msk
= ((1 << shift_count
) - 1) << msk_start
;
294 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
296 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
297 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
298 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
299 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
301 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
303 tcg_gen_shli_tl(ret
, r1
, shift_count
);
305 tcg_temp_free(t_max
);
306 tcg_temp_free(t_min
);
309 tcg_gen_movi_tl(cpu_PSW_V
, 0);
311 msk
= (1 << -shift_count
) - 1;
312 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
314 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
316 /* calc av overflow bit */
317 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
318 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
319 /* calc sav overflow bit */
320 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
323 tcg_temp_free(temp2
);
327 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
329 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
332 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
334 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
338 * Functions for decoding instructions
341 static void decode_src_opc(DisasContext
*ctx
, int op1
)
347 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
348 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
351 case OPC1_16_SRC_ADD
:
352 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
354 case OPC1_16_SRC_ADD_A15
:
355 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
357 case OPC1_16_SRC_ADD_15A
:
358 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
360 case OPC1_16_SRC_ADD_A
:
361 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
363 case OPC1_16_SRC_CADD
:
364 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
367 case OPC1_16_SRC_CADDN
:
368 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
371 case OPC1_16_SRC_CMOV
:
372 temp
= tcg_const_tl(0);
373 temp2
= tcg_const_tl(const4
);
374 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
375 temp2
, cpu_gpr_d
[r1
]);
377 tcg_temp_free(temp2
);
379 case OPC1_16_SRC_CMOVN
:
380 temp
= tcg_const_tl(0);
381 temp2
= tcg_const_tl(const4
);
382 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
383 temp2
, cpu_gpr_d
[r1
]);
385 tcg_temp_free(temp2
);
388 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
392 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
395 case OPC1_16_SRC_MOV
:
396 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
398 case OPC1_16_SRC_MOV_A
:
399 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
400 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
403 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
405 case OPC1_16_SRC_SHA
:
406 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
411 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
416 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
417 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
420 case OPC1_16_SRR_ADD
:
421 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
423 case OPC1_16_SRR_ADD_A15
:
424 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
426 case OPC1_16_SRR_ADD_15A
:
427 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
429 case OPC1_16_SRR_ADD_A
:
430 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
432 case OPC1_16_SRR_ADDS
:
433 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
435 case OPC1_16_SRR_AND
:
436 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
438 case OPC1_16_SRR_CMOV
:
439 temp
= tcg_const_tl(0);
440 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
441 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
444 case OPC1_16_SRR_CMOVN
:
445 temp
= tcg_const_tl(0);
446 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
447 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
451 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
455 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
458 case OPC1_16_SRR_MOV
:
459 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
461 case OPC1_16_SRR_MOV_A
:
462 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
464 case OPC1_16_SRR_MOV_AA
:
465 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
467 case OPC1_16_SRR_MOV_D
:
468 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
470 case OPC1_16_SRR_MUL
:
471 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
474 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
476 case OPC1_16_SRR_SUB
:
477 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
479 case OPC1_16_SRR_SUB_A15B
:
480 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
482 case OPC1_16_SRR_SUB_15AB
:
483 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
485 case OPC1_16_SRR_SUBS
:
486 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
488 case OPC1_16_SRR_XOR
:
489 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
494 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
498 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
499 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
502 case OPC1_16_SSR_ST_A
:
503 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
505 case OPC1_16_SSR_ST_A_POSTINC
:
506 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
507 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
509 case OPC1_16_SSR_ST_B
:
510 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
512 case OPC1_16_SSR_ST_B_POSTINC
:
513 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
514 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
516 case OPC1_16_SSR_ST_H
:
517 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
519 case OPC1_16_SSR_ST_H_POSTINC
:
520 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
521 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
523 case OPC1_16_SSR_ST_W
:
524 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
526 case OPC1_16_SSR_ST_W_POSTINC
:
527 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
528 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
533 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
540 op1
= MASK_OP_MAJOR(ctx
->opcode
);
542 /* handle ADDSC.A opcode only being 6 bit long */
543 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
544 op1
= OPC1_16_SRRS_ADDSC_A
;
548 case OPC1_16_SRC_ADD
:
549 case OPC1_16_SRC_ADD_A15
:
550 case OPC1_16_SRC_ADD_15A
:
551 case OPC1_16_SRC_ADD_A
:
552 case OPC1_16_SRC_CADD
:
553 case OPC1_16_SRC_CADDN
:
554 case OPC1_16_SRC_CMOV
:
555 case OPC1_16_SRC_CMOVN
:
558 case OPC1_16_SRC_MOV
:
559 case OPC1_16_SRC_MOV_A
:
561 case OPC1_16_SRC_SHA
:
562 decode_src_opc(ctx
, op1
);
565 case OPC1_16_SRR_ADD
:
566 case OPC1_16_SRR_ADD_A15
:
567 case OPC1_16_SRR_ADD_15A
:
568 case OPC1_16_SRR_ADD_A
:
569 case OPC1_16_SRR_ADDS
:
570 case OPC1_16_SRR_AND
:
571 case OPC1_16_SRR_CMOV
:
572 case OPC1_16_SRR_CMOVN
:
575 case OPC1_16_SRR_MOV
:
576 case OPC1_16_SRR_MOV_A
:
577 case OPC1_16_SRR_MOV_AA
:
578 case OPC1_16_SRR_MOV_D
:
579 case OPC1_16_SRR_MUL
:
581 case OPC1_16_SRR_SUB
:
582 case OPC1_16_SRR_SUB_A15B
:
583 case OPC1_16_SRR_SUB_15AB
:
584 case OPC1_16_SRR_SUBS
:
585 case OPC1_16_SRR_XOR
:
586 decode_srr_opc(ctx
, op1
);
589 case OPC1_16_SSR_ST_A
:
590 case OPC1_16_SSR_ST_A_POSTINC
:
591 case OPC1_16_SSR_ST_B
:
592 case OPC1_16_SSR_ST_B_POSTINC
:
593 case OPC1_16_SSR_ST_H
:
594 case OPC1_16_SSR_ST_H_POSTINC
:
595 case OPC1_16_SSR_ST_W
:
596 case OPC1_16_SSR_ST_W_POSTINC
:
597 decode_ssr_opc(ctx
, op1
);
600 case OPC1_16_SRRS_ADDSC_A
:
601 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
602 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
603 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
604 temp
= tcg_temp_new();
605 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
606 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
610 case OPC1_16_SLRO_LD_A
:
611 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
612 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
613 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
615 case OPC1_16_SLRO_LD_BU
:
616 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
617 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
618 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
620 case OPC1_16_SLRO_LD_H
:
621 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
622 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
623 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
625 case OPC1_16_SLRO_LD_W
:
626 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
627 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
628 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
633 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
637 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
639 /* 16-Bit Instruction */
640 if ((ctx
->opcode
& 0x1) == 0) {
641 ctx
->next_pc
= ctx
->pc
+ 2;
642 decode_16Bit_opc(env
, ctx
);
643 /* 32-Bit Instruction */
645 ctx
->next_pc
= ctx
->pc
+ 4;
646 decode_32Bit_opc(env
, ctx
);
651 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
654 CPUState
*cs
= CPU(cpu
);
655 CPUTriCoreState
*env
= &cpu
->env
;
657 target_ulong pc_start
;
659 uint16_t *gen_opc_end
;
662 qemu_log("search pc %d\n", search_pc
);
667 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
671 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
672 ctx
.bstate
= BS_NONE
;
673 ctx
.mem_idx
= cpu_mmu_index(env
);
675 tcg_clear_temp_count();
677 while (ctx
.bstate
== BS_NONE
) {
678 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
679 decode_opc(env
, &ctx
, 0);
683 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
689 ctx
.pc
= ctx
.next_pc
;
692 gen_tb_end(tb
, num_insns
);
693 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
695 printf("done_generating search pc\n");
697 tb
->size
= ctx
.pc
- pc_start
;
698 tb
->icount
= num_insns
;
700 if (tcg_check_temp_count()) {
701 printf("LEAK at %08x\n", env
->PC
);
705 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
706 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
707 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
714 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
716 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
720 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
722 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
726 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
728 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
736 void cpu_state_reset(CPUTriCoreState
*env
)
738 /* Reset Regs to Default Value */
742 static void tricore_tcg_init_csfr(void)
744 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
745 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
746 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
747 offsetof(CPUTriCoreState
, PSW
), "PSW");
748 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
749 offsetof(CPUTriCoreState
, PC
), "PC");
750 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
751 offsetof(CPUTriCoreState
, ICR
), "ICR");
754 void tricore_tcg_init(void)
761 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
763 for (i
= 0 ; i
< 16 ; i
++) {
764 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
765 offsetof(CPUTriCoreState
, gpr_a
[i
]),
768 for (i
= 0 ; i
< 16 ; i
++) {
769 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
770 offsetof(CPUTriCoreState
, gpr_d
[i
]),
773 tricore_tcg_init_csfr();
774 /* init PSW flag cache */
775 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
776 offsetof(CPUTriCoreState
, PSW_USB_C
),
778 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
779 offsetof(CPUTriCoreState
, PSW_USB_V
),
781 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
782 offsetof(CPUTriCoreState
, PSW_USB_SV
),
784 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
785 offsetof(CPUTriCoreState
, PSW_USB_AV
),
787 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
788 offsetof(CPUTriCoreState
, PSW_USB_SAV
),