target-tricore: Add instructions of SRRS and SLRO opcode format
[qemu.git] / target-tricore / translate.c
blob258f4e4af69b4ad3b58f9f95b8cb0b3b8c7ad4a5
1 /*
2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
32 * TCG registers
34 static TCGv cpu_PC;
35 static TCGv cpu_PCXI;
36 static TCGv cpu_PSW;
37 static TCGv cpu_ICR;
38 /* GPR registers */
39 static TCGv cpu_gpr_a[16];
40 static TCGv cpu_gpr_d[16];
41 /* PSW Flag cache */
42 static TCGv cpu_PSW_C;
43 static TCGv cpu_PSW_V;
44 static TCGv cpu_PSW_SV;
45 static TCGv cpu_PSW_AV;
46 static TCGv cpu_PSW_SAV;
47 /* CPU env */
48 static TCGv_ptr cpu_env;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext {
65 struct TranslationBlock *tb;
66 target_ulong pc, saved_pc, next_pc;
67 uint32_t opcode;
68 int singlestep_enabled;
69 /* Routine used to access memory */
70 int mem_idx;
71 uint32_t hflags, saved_hflags;
72 int bstate;
73 } DisasContext;
75 enum {
77 BS_NONE = 0,
78 BS_STOP = 1,
79 BS_BRANCH = 2,
80 BS_EXCP = 3,
83 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
84 fprintf_function cpu_fprintf, int flags)
86 TriCoreCPU *cpu = TRICORE_CPU(cs);
87 CPUTriCoreState *env = &cpu->env;
88 int i;
90 cpu_fprintf(f, "PC=%08x\n", env->PC);
91 for (i = 0; i < 16; ++i) {
92 if ((i & 3) == 0) {
93 cpu_fprintf(f, "GPR A%02d:", i);
95 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_a[i], env->gpr_a[i]);
97 for (i = 0; i < 16; ++i) {
98 if ((i & 3) == 0) {
99 cpu_fprintf(f, "GPR D%02d:", i);
101 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_d[i], env->gpr_d[i]);
107 * Functions to generate micro-ops
110 /* Functions for load/save to/from memory */
112 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
113 int16_t con, TCGMemOp mop)
115 TCGv temp = tcg_temp_new();
116 tcg_gen_addi_tl(temp, r2, con);
117 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
118 tcg_temp_free(temp);
121 static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
122 int16_t con, TCGMemOp mop)
124 TCGv temp = tcg_temp_new();
125 tcg_gen_addi_tl(temp, r2, con);
126 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
127 tcg_temp_free(temp);
130 /* Functions for arithmetic instructions */
132 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
134 TCGv t0 = tcg_temp_new_i32();
135 TCGv result = tcg_temp_new_i32();
136 /* Addition and set V/SV bits */
137 tcg_gen_add_tl(result, r1, r2);
138 /* calc V bit */
139 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
140 tcg_gen_xor_tl(t0, r1, r2);
141 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
142 /* Calc SV bit */
143 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
144 /* Calc AV/SAV bits */
145 tcg_gen_add_tl(cpu_PSW_AV, result, result);
146 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
147 /* calc SAV */
148 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
149 /* write back result */
150 tcg_gen_mov_tl(ret, result);
152 tcg_temp_free(result);
153 tcg_temp_free(t0);
156 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
158 TCGv temp = tcg_const_i32(r2);
159 gen_add_d(ret, r1, temp);
160 tcg_temp_free(temp);
163 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
164 TCGv r4)
166 TCGv temp = tcg_temp_new();
167 TCGv temp2 = tcg_temp_new();
168 TCGv result = tcg_temp_new();
169 TCGv mask = tcg_temp_new();
170 TCGv t0 = tcg_const_i32(0);
172 /* create mask for sticky bits */
173 tcg_gen_setcond_tl(cond, mask, r4, t0);
174 tcg_gen_shli_tl(mask, mask, 31);
176 tcg_gen_add_tl(result, r1, r2);
177 /* Calc PSW_V */
178 tcg_gen_xor_tl(temp, result, r1);
179 tcg_gen_xor_tl(temp2, r1, r2);
180 tcg_gen_andc_tl(temp, temp, temp2);
181 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
182 /* Set PSW_SV */
183 tcg_gen_and_tl(temp, temp, mask);
184 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
185 /* calc AV bit */
186 tcg_gen_add_tl(temp, result, result);
187 tcg_gen_xor_tl(temp, temp, result);
188 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
189 /* calc SAV bit */
190 tcg_gen_and_tl(temp, temp, mask);
191 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
192 /* write back result */
193 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r3);
195 tcg_temp_free(t0);
196 tcg_temp_free(temp);
197 tcg_temp_free(temp2);
198 tcg_temp_free(result);
199 tcg_temp_free(mask);
202 static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
203 TCGv r3, TCGv r4)
205 TCGv temp = tcg_const_i32(r2);
206 gen_cond_add(cond, r1, temp, r3, r4);
207 tcg_temp_free(temp);
210 static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
212 TCGv temp = tcg_temp_new_i32();
213 TCGv result = tcg_temp_new_i32();
215 tcg_gen_sub_tl(result, r1, r2);
216 /* calc V bit */
217 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
218 tcg_gen_xor_tl(temp, r1, r2);
219 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
220 /* calc SV bit */
221 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
222 /* Calc AV bit */
223 tcg_gen_add_tl(cpu_PSW_AV, result, result);
224 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
225 /* calc SAV bit */
226 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
227 /* write back result */
228 tcg_gen_mov_tl(ret, result);
230 tcg_temp_free(temp);
231 tcg_temp_free(result);
234 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
236 TCGv high = tcg_temp_new();
237 TCGv low = tcg_temp_new();
239 tcg_gen_muls2_tl(low, high, r1, r2);
240 tcg_gen_mov_tl(ret, low);
241 /* calc V bit */
242 tcg_gen_sari_tl(low, low, 31);
243 tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
244 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
245 /* calc SV bit */
246 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
247 /* Calc AV bit */
248 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
249 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
250 /* calc SAV bit */
251 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
253 tcg_temp_free(high);
254 tcg_temp_free(low);
257 static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
259 if (shift_count == -32) {
260 tcg_gen_movi_tl(ret, 0);
261 } else if (shift_count >= 0) {
262 tcg_gen_shli_tl(ret, r1, shift_count);
263 } else {
264 tcg_gen_shri_tl(ret, r1, -shift_count);
268 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
270 uint32_t msk, msk_start;
271 TCGv temp = tcg_temp_new();
272 TCGv temp2 = tcg_temp_new();
273 TCGv t_0 = tcg_const_i32(0);
275 if (shift_count == 0) {
276 /* Clear PSW.C and PSW.V */
277 tcg_gen_movi_tl(cpu_PSW_C, 0);
278 tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
279 tcg_gen_mov_tl(ret, r1);
280 } else if (shift_count == -32) {
281 /* set PSW.C */
282 tcg_gen_mov_tl(cpu_PSW_C, r1);
283 /* fill ret completly with sign bit */
284 tcg_gen_sari_tl(ret, r1, 31);
285 /* clear PSW.V */
286 tcg_gen_movi_tl(cpu_PSW_V, 0);
287 } else if (shift_count > 0) {
288 TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
289 TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
291 /* calc carry */
292 msk_start = 32 - shift_count;
293 msk = ((1 << shift_count) - 1) << msk_start;
294 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
295 /* calc v/sv bits */
296 tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
297 tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
298 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
299 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
300 /* calc sv */
301 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
302 /* do shift */
303 tcg_gen_shli_tl(ret, r1, shift_count);
305 tcg_temp_free(t_max);
306 tcg_temp_free(t_min);
307 } else {
308 /* clear PSW.V */
309 tcg_gen_movi_tl(cpu_PSW_V, 0);
310 /* calc carry */
311 msk = (1 << -shift_count) - 1;
312 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
313 /* do shift */
314 tcg_gen_sari_tl(ret, r1, -shift_count);
316 /* calc av overflow bit */
317 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
318 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
319 /* calc sav overflow bit */
320 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
322 tcg_temp_free(temp);
323 tcg_temp_free(temp2);
324 tcg_temp_free(t_0);
327 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
329 gen_helper_add_ssov(ret, cpu_env, r1, r2);
332 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
334 gen_helper_sub_ssov(ret, cpu_env, r1, r2);
338 * Functions for decoding instructions
341 static void decode_src_opc(DisasContext *ctx, int op1)
343 int r1;
344 int32_t const4;
345 TCGv temp, temp2;
347 r1 = MASK_OP_SRC_S1D(ctx->opcode);
348 const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
350 switch (op1) {
351 case OPC1_16_SRC_ADD:
352 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
353 break;
354 case OPC1_16_SRC_ADD_A15:
355 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
356 break;
357 case OPC1_16_SRC_ADD_15A:
358 gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
359 break;
360 case OPC1_16_SRC_ADD_A:
361 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
362 break;
363 case OPC1_16_SRC_CADD:
364 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
365 cpu_gpr_d[15]);
366 break;
367 case OPC1_16_SRC_CADDN:
368 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
369 cpu_gpr_d[15]);
370 break;
371 case OPC1_16_SRC_CMOV:
372 temp = tcg_const_tl(0);
373 temp2 = tcg_const_tl(const4);
374 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
375 temp2, cpu_gpr_d[r1]);
376 tcg_temp_free(temp);
377 tcg_temp_free(temp2);
378 break;
379 case OPC1_16_SRC_CMOVN:
380 temp = tcg_const_tl(0);
381 temp2 = tcg_const_tl(const4);
382 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
383 temp2, cpu_gpr_d[r1]);
384 tcg_temp_free(temp);
385 tcg_temp_free(temp2);
386 break;
387 case OPC1_16_SRC_EQ:
388 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
389 const4);
390 break;
391 case OPC1_16_SRC_LT:
392 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
393 const4);
394 break;
395 case OPC1_16_SRC_MOV:
396 tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
397 break;
398 case OPC1_16_SRC_MOV_A:
399 const4 = MASK_OP_SRC_CONST4(ctx->opcode);
400 tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
401 break;
402 case OPC1_16_SRC_SH:
403 gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
404 break;
405 case OPC1_16_SRC_SHA:
406 gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
407 break;
411 static void decode_srr_opc(DisasContext *ctx, int op1)
413 int r1, r2;
414 TCGv temp;
416 r1 = MASK_OP_SRR_S1D(ctx->opcode);
417 r2 = MASK_OP_SRR_S2(ctx->opcode);
419 switch (op1) {
420 case OPC1_16_SRR_ADD:
421 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
422 break;
423 case OPC1_16_SRR_ADD_A15:
424 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
425 break;
426 case OPC1_16_SRR_ADD_15A:
427 gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
428 break;
429 case OPC1_16_SRR_ADD_A:
430 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
431 break;
432 case OPC1_16_SRR_ADDS:
433 gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
434 break;
435 case OPC1_16_SRR_AND:
436 tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
437 break;
438 case OPC1_16_SRR_CMOV:
439 temp = tcg_const_tl(0);
440 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
441 cpu_gpr_d[r2], cpu_gpr_d[r1]);
442 tcg_temp_free(temp);
443 break;
444 case OPC1_16_SRR_CMOVN:
445 temp = tcg_const_tl(0);
446 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
447 cpu_gpr_d[r2], cpu_gpr_d[r1]);
448 tcg_temp_free(temp);
449 break;
450 case OPC1_16_SRR_EQ:
451 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
452 cpu_gpr_d[r2]);
453 break;
454 case OPC1_16_SRR_LT:
455 tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
456 cpu_gpr_d[r2]);
457 break;
458 case OPC1_16_SRR_MOV:
459 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
460 break;
461 case OPC1_16_SRR_MOV_A:
462 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
463 break;
464 case OPC1_16_SRR_MOV_AA:
465 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
466 break;
467 case OPC1_16_SRR_MOV_D:
468 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
469 break;
470 case OPC1_16_SRR_MUL:
471 gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
472 break;
473 case OPC1_16_SRR_OR:
474 tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
475 break;
476 case OPC1_16_SRR_SUB:
477 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
478 break;
479 case OPC1_16_SRR_SUB_A15B:
480 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
481 break;
482 case OPC1_16_SRR_SUB_15AB:
483 gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
484 break;
485 case OPC1_16_SRR_SUBS:
486 gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
487 break;
488 case OPC1_16_SRR_XOR:
489 tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
490 break;
494 static void decode_ssr_opc(DisasContext *ctx, int op1)
496 int r1, r2;
498 r1 = MASK_OP_SSR_S1(ctx->opcode);
499 r2 = MASK_OP_SSR_S2(ctx->opcode);
501 switch (op1) {
502 case OPC1_16_SSR_ST_A:
503 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
504 break;
505 case OPC1_16_SSR_ST_A_POSTINC:
506 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
507 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
508 break;
509 case OPC1_16_SSR_ST_B:
510 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
511 break;
512 case OPC1_16_SSR_ST_B_POSTINC:
513 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
514 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
515 break;
516 case OPC1_16_SSR_ST_H:
517 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
518 break;
519 case OPC1_16_SSR_ST_H_POSTINC:
520 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
521 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
522 break;
523 case OPC1_16_SSR_ST_W:
524 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
525 break;
526 case OPC1_16_SSR_ST_W_POSTINC:
527 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
528 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
529 break;
533 static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
535 int op1;
536 int r1, r2;
537 int32_t const16;
538 TCGv temp;
540 op1 = MASK_OP_MAJOR(ctx->opcode);
542 /* handle ADDSC.A opcode only being 6 bit long */
543 if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
544 op1 = OPC1_16_SRRS_ADDSC_A;
547 switch (op1) {
548 case OPC1_16_SRC_ADD:
549 case OPC1_16_SRC_ADD_A15:
550 case OPC1_16_SRC_ADD_15A:
551 case OPC1_16_SRC_ADD_A:
552 case OPC1_16_SRC_CADD:
553 case OPC1_16_SRC_CADDN:
554 case OPC1_16_SRC_CMOV:
555 case OPC1_16_SRC_CMOVN:
556 case OPC1_16_SRC_EQ:
557 case OPC1_16_SRC_LT:
558 case OPC1_16_SRC_MOV:
559 case OPC1_16_SRC_MOV_A:
560 case OPC1_16_SRC_SH:
561 case OPC1_16_SRC_SHA:
562 decode_src_opc(ctx, op1);
563 break;
564 /* SRR-format */
565 case OPC1_16_SRR_ADD:
566 case OPC1_16_SRR_ADD_A15:
567 case OPC1_16_SRR_ADD_15A:
568 case OPC1_16_SRR_ADD_A:
569 case OPC1_16_SRR_ADDS:
570 case OPC1_16_SRR_AND:
571 case OPC1_16_SRR_CMOV:
572 case OPC1_16_SRR_CMOVN:
573 case OPC1_16_SRR_EQ:
574 case OPC1_16_SRR_LT:
575 case OPC1_16_SRR_MOV:
576 case OPC1_16_SRR_MOV_A:
577 case OPC1_16_SRR_MOV_AA:
578 case OPC1_16_SRR_MOV_D:
579 case OPC1_16_SRR_MUL:
580 case OPC1_16_SRR_OR:
581 case OPC1_16_SRR_SUB:
582 case OPC1_16_SRR_SUB_A15B:
583 case OPC1_16_SRR_SUB_15AB:
584 case OPC1_16_SRR_SUBS:
585 case OPC1_16_SRR_XOR:
586 decode_srr_opc(ctx, op1);
587 break;
588 /* SSR-format */
589 case OPC1_16_SSR_ST_A:
590 case OPC1_16_SSR_ST_A_POSTINC:
591 case OPC1_16_SSR_ST_B:
592 case OPC1_16_SSR_ST_B_POSTINC:
593 case OPC1_16_SSR_ST_H:
594 case OPC1_16_SSR_ST_H_POSTINC:
595 case OPC1_16_SSR_ST_W:
596 case OPC1_16_SSR_ST_W_POSTINC:
597 decode_ssr_opc(ctx, op1);
598 break;
599 /* SRRS-format */
600 case OPC1_16_SRRS_ADDSC_A:
601 r2 = MASK_OP_SRRS_S2(ctx->opcode);
602 r1 = MASK_OP_SRRS_S1D(ctx->opcode);
603 const16 = MASK_OP_SRRS_N(ctx->opcode);
604 temp = tcg_temp_new();
605 tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
606 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
607 tcg_temp_free(temp);
608 break;
609 /* SLRO-format */
610 case OPC1_16_SLRO_LD_A:
611 r1 = MASK_OP_SLRO_D(ctx->opcode);
612 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
613 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
614 break;
615 case OPC1_16_SLRO_LD_BU:
616 r1 = MASK_OP_SLRO_D(ctx->opcode);
617 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
618 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
619 break;
620 case OPC1_16_SLRO_LD_H:
621 r1 = MASK_OP_SLRO_D(ctx->opcode);
622 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
623 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
624 break;
625 case OPC1_16_SLRO_LD_W:
626 r1 = MASK_OP_SLRO_D(ctx->opcode);
627 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
628 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
629 break;
633 static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
637 static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
639 /* 16-Bit Instruction */
640 if ((ctx->opcode & 0x1) == 0) {
641 ctx->next_pc = ctx->pc + 2;
642 decode_16Bit_opc(env, ctx);
643 /* 32-Bit Instruction */
644 } else {
645 ctx->next_pc = ctx->pc + 4;
646 decode_32Bit_opc(env, ctx);
650 static inline void
651 gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
652 int search_pc)
654 CPUState *cs = CPU(cpu);
655 CPUTriCoreState *env = &cpu->env;
656 DisasContext ctx;
657 target_ulong pc_start;
658 int num_insns;
659 uint16_t *gen_opc_end;
661 if (search_pc) {
662 qemu_log("search pc %d\n", search_pc);
665 num_insns = 0;
666 pc_start = tb->pc;
667 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
668 ctx.pc = pc_start;
669 ctx.saved_pc = -1;
670 ctx.tb = tb;
671 ctx.singlestep_enabled = cs->singlestep_enabled;
672 ctx.bstate = BS_NONE;
673 ctx.mem_idx = cpu_mmu_index(env);
675 tcg_clear_temp_count();
676 gen_tb_start();
677 while (ctx.bstate == BS_NONE) {
678 ctx.opcode = cpu_ldl_code(env, ctx.pc);
679 decode_opc(env, &ctx, 0);
681 num_insns++;
683 if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
684 break;
686 if (singlestep) {
687 break;
689 ctx.pc = ctx.next_pc;
692 gen_tb_end(tb, num_insns);
693 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
694 if (search_pc) {
695 printf("done_generating search pc\n");
696 } else {
697 tb->size = ctx.pc - pc_start;
698 tb->icount = num_insns;
700 if (tcg_check_temp_count()) {
701 printf("LEAK at %08x\n", env->PC);
704 #ifdef DEBUG_DISAS
705 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
706 qemu_log("IN: %s\n", lookup_symbol(pc_start));
707 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
708 qemu_log("\n");
710 #endif
713 void
714 gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
716 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
719 void
720 gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
722 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
725 void
726 restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
728 env->PC = tcg_ctx.gen_opc_pc[pc_pos];
732 * Initialization
736 void cpu_state_reset(CPUTriCoreState *env)
738 /* Reset Regs to Default Value */
739 env->PSW = 0xb80;
742 static void tricore_tcg_init_csfr(void)
744 cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
745 offsetof(CPUTriCoreState, PCXI), "PCXI");
746 cpu_PSW = tcg_global_mem_new(TCG_AREG0,
747 offsetof(CPUTriCoreState, PSW), "PSW");
748 cpu_PC = tcg_global_mem_new(TCG_AREG0,
749 offsetof(CPUTriCoreState, PC), "PC");
750 cpu_ICR = tcg_global_mem_new(TCG_AREG0,
751 offsetof(CPUTriCoreState, ICR), "ICR");
754 void tricore_tcg_init(void)
756 int i;
757 static int inited;
758 if (inited) {
759 return;
761 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
762 /* reg init */
763 for (i = 0 ; i < 16 ; i++) {
764 cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
765 offsetof(CPUTriCoreState, gpr_a[i]),
766 regnames_a[i]);
768 for (i = 0 ; i < 16 ; i++) {
769 cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
770 offsetof(CPUTriCoreState, gpr_d[i]),
771 regnames_d[i]);
773 tricore_tcg_init_csfr();
774 /* init PSW flag cache */
775 cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
776 offsetof(CPUTriCoreState, PSW_USB_C),
777 "PSW_C");
778 cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
779 offsetof(CPUTriCoreState, PSW_USB_V),
780 "PSW_V");
781 cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
782 offsetof(CPUTriCoreState, PSW_USB_SV),
783 "PSW_SV");
784 cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
785 offsetof(CPUTriCoreState, PSW_USB_AV),
786 "PSW_AV");
787 cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
788 offsetof(CPUTriCoreState, PSW_USB_SAV),
789 "PSW_SAV");