Merge remote-tracking branch 'remotes/riku/tags/pull-linux-user-20141006-2' into...
[qemu-kvm.git] / target-tricore / translate.c
blob4f654deb65cb23c596c342b76629121f8800cf4a
1 /*
2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
32 * TCG registers
34 static TCGv cpu_PC;
35 static TCGv cpu_PCXI;
36 static TCGv cpu_PSW;
37 static TCGv cpu_ICR;
38 /* GPR registers */
39 static TCGv cpu_gpr_a[16];
40 static TCGv cpu_gpr_d[16];
41 /* PSW Flag cache */
42 static TCGv cpu_PSW_C;
43 static TCGv cpu_PSW_V;
44 static TCGv cpu_PSW_SV;
45 static TCGv cpu_PSW_AV;
46 static TCGv cpu_PSW_SAV;
47 /* CPU env */
48 static TCGv_ptr cpu_env;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext {
65 struct TranslationBlock *tb;
66 target_ulong pc, saved_pc, next_pc;
67 uint32_t opcode;
68 int singlestep_enabled;
69 /* Routine used to access memory */
70 int mem_idx;
71 uint32_t hflags, saved_hflags;
72 int bstate;
73 } DisasContext;
75 enum {
77 BS_NONE = 0,
78 BS_STOP = 1,
79 BS_BRANCH = 2,
80 BS_EXCP = 3,
83 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
84 fprintf_function cpu_fprintf, int flags)
86 TriCoreCPU *cpu = TRICORE_CPU(cs);
87 CPUTriCoreState *env = &cpu->env;
88 int i;
90 cpu_fprintf(f, "PC=%08x\n", env->PC);
91 for (i = 0; i < 16; ++i) {
92 if ((i & 3) == 0) {
93 cpu_fprintf(f, "GPR A%02d:", i);
95 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_a[i], env->gpr_a[i]);
97 for (i = 0; i < 16; ++i) {
98 if ((i & 3) == 0) {
99 cpu_fprintf(f, "GPR D%02d:", i);
101 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_d[i], env->gpr_d[i]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
116 } while (0)
118 /* Functions for load/save to/from memory */
120 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
121 int16_t con, TCGMemOp mop)
123 TCGv temp = tcg_temp_new();
124 tcg_gen_addi_tl(temp, r2, con);
125 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
126 tcg_temp_free(temp);
129 static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
130 int16_t con, TCGMemOp mop)
132 TCGv temp = tcg_temp_new();
133 tcg_gen_addi_tl(temp, r2, con);
134 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
135 tcg_temp_free(temp);
138 /* Functions for arithmetic instructions */
140 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
142 TCGv t0 = tcg_temp_new_i32();
143 TCGv result = tcg_temp_new_i32();
144 /* Addition and set V/SV bits */
145 tcg_gen_add_tl(result, r1, r2);
146 /* calc V bit */
147 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
148 tcg_gen_xor_tl(t0, r1, r2);
149 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
150 /* Calc SV bit */
151 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
152 /* Calc AV/SAV bits */
153 tcg_gen_add_tl(cpu_PSW_AV, result, result);
154 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
155 /* calc SAV */
156 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
157 /* write back result */
158 tcg_gen_mov_tl(ret, result);
160 tcg_temp_free(result);
161 tcg_temp_free(t0);
164 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
166 TCGv temp = tcg_const_i32(r2);
167 gen_add_d(ret, r1, temp);
168 tcg_temp_free(temp);
171 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
172 TCGv r4)
174 TCGv temp = tcg_temp_new();
175 TCGv temp2 = tcg_temp_new();
176 TCGv result = tcg_temp_new();
177 TCGv mask = tcg_temp_new();
178 TCGv t0 = tcg_const_i32(0);
180 /* create mask for sticky bits */
181 tcg_gen_setcond_tl(cond, mask, r4, t0);
182 tcg_gen_shli_tl(mask, mask, 31);
184 tcg_gen_add_tl(result, r1, r2);
185 /* Calc PSW_V */
186 tcg_gen_xor_tl(temp, result, r1);
187 tcg_gen_xor_tl(temp2, r1, r2);
188 tcg_gen_andc_tl(temp, temp, temp2);
189 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
190 /* Set PSW_SV */
191 tcg_gen_and_tl(temp, temp, mask);
192 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
193 /* calc AV bit */
194 tcg_gen_add_tl(temp, result, result);
195 tcg_gen_xor_tl(temp, temp, result);
196 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
197 /* calc SAV bit */
198 tcg_gen_and_tl(temp, temp, mask);
199 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
200 /* write back result */
201 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r3);
203 tcg_temp_free(t0);
204 tcg_temp_free(temp);
205 tcg_temp_free(temp2);
206 tcg_temp_free(result);
207 tcg_temp_free(mask);
210 static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
211 TCGv r3, TCGv r4)
213 TCGv temp = tcg_const_i32(r2);
214 gen_cond_add(cond, r1, temp, r3, r4);
215 tcg_temp_free(temp);
218 static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
220 TCGv temp = tcg_temp_new_i32();
221 TCGv result = tcg_temp_new_i32();
223 tcg_gen_sub_tl(result, r1, r2);
224 /* calc V bit */
225 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
226 tcg_gen_xor_tl(temp, r1, r2);
227 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
228 /* calc SV bit */
229 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
230 /* Calc AV bit */
231 tcg_gen_add_tl(cpu_PSW_AV, result, result);
232 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
233 /* calc SAV bit */
234 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
235 /* write back result */
236 tcg_gen_mov_tl(ret, result);
238 tcg_temp_free(temp);
239 tcg_temp_free(result);
242 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
244 TCGv high = tcg_temp_new();
245 TCGv low = tcg_temp_new();
247 tcg_gen_muls2_tl(low, high, r1, r2);
248 tcg_gen_mov_tl(ret, low);
249 /* calc V bit */
250 tcg_gen_sari_tl(low, low, 31);
251 tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
252 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
253 /* calc SV bit */
254 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
255 /* Calc AV bit */
256 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
257 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
258 /* calc SAV bit */
259 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
261 tcg_temp_free(high);
262 tcg_temp_free(low);
265 static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
267 TCGv sat_neg = tcg_const_i32(low);
268 TCGv temp = tcg_const_i32(up);
270 /* sat_neg = (arg < low ) ? low : arg; */
271 tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
273 /* ret = (sat_neg > up ) ? up : sat_neg; */
274 tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
276 tcg_temp_free(sat_neg);
277 tcg_temp_free(temp);
280 static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
282 TCGv temp = tcg_const_i32(up);
283 /* sat_neg = (arg > up ) ? up : arg; */
284 tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
285 tcg_temp_free(temp);
288 static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
290 if (shift_count == -32) {
291 tcg_gen_movi_tl(ret, 0);
292 } else if (shift_count >= 0) {
293 tcg_gen_shli_tl(ret, r1, shift_count);
294 } else {
295 tcg_gen_shri_tl(ret, r1, -shift_count);
299 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
301 uint32_t msk, msk_start;
302 TCGv temp = tcg_temp_new();
303 TCGv temp2 = tcg_temp_new();
304 TCGv t_0 = tcg_const_i32(0);
306 if (shift_count == 0) {
307 /* Clear PSW.C and PSW.V */
308 tcg_gen_movi_tl(cpu_PSW_C, 0);
309 tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
310 tcg_gen_mov_tl(ret, r1);
311 } else if (shift_count == -32) {
312 /* set PSW.C */
313 tcg_gen_mov_tl(cpu_PSW_C, r1);
314 /* fill ret completly with sign bit */
315 tcg_gen_sari_tl(ret, r1, 31);
316 /* clear PSW.V */
317 tcg_gen_movi_tl(cpu_PSW_V, 0);
318 } else if (shift_count > 0) {
319 TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
320 TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
322 /* calc carry */
323 msk_start = 32 - shift_count;
324 msk = ((1 << shift_count) - 1) << msk_start;
325 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
326 /* calc v/sv bits */
327 tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
328 tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
329 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
330 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
331 /* calc sv */
332 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
333 /* do shift */
334 tcg_gen_shli_tl(ret, r1, shift_count);
336 tcg_temp_free(t_max);
337 tcg_temp_free(t_min);
338 } else {
339 /* clear PSW.V */
340 tcg_gen_movi_tl(cpu_PSW_V, 0);
341 /* calc carry */
342 msk = (1 << -shift_count) - 1;
343 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
344 /* do shift */
345 tcg_gen_sari_tl(ret, r1, -shift_count);
347 /* calc av overflow bit */
348 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
349 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
350 /* calc sav overflow bit */
351 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
353 tcg_temp_free(temp);
354 tcg_temp_free(temp2);
355 tcg_temp_free(t_0);
358 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
360 gen_helper_add_ssov(ret, cpu_env, r1, r2);
363 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
365 gen_helper_sub_ssov(ret, cpu_env, r1, r2);
368 /* helpers for generating program flow micro-ops */
370 static inline void gen_save_pc(target_ulong pc)
372 tcg_gen_movi_tl(cpu_PC, pc);
375 static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
377 TranslationBlock *tb;
378 tb = ctx->tb;
379 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
380 likely(!ctx->singlestep_enabled)) {
381 tcg_gen_goto_tb(n);
382 gen_save_pc(dest);
383 tcg_gen_exit_tb((uintptr_t)tb + n);
384 } else {
385 gen_save_pc(dest);
386 if (ctx->singlestep_enabled) {
387 /* raise exception debug */
389 tcg_gen_exit_tb(0);
393 static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
394 TCGv r2, int16_t address)
396 int jumpLabel;
397 jumpLabel = gen_new_label();
398 tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
400 gen_goto_tb(ctx, 1, ctx->next_pc);
402 gen_set_label(jumpLabel);
403 gen_goto_tb(ctx, 0, ctx->pc + address * 2);
406 static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
407 int r2, int16_t address)
409 TCGv temp = tcg_const_i32(r2);
410 gen_branch_cond(ctx, cond, r1, temp, address);
411 tcg_temp_free(temp);
414 static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
416 int l1;
417 l1 = gen_new_label();
419 tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
420 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
421 gen_goto_tb(ctx, 1, ctx->pc + offset);
422 gen_set_label(l1);
423 gen_goto_tb(ctx, 0, ctx->next_pc);
426 static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
427 int r2 , int32_t constant , int32_t offset)
429 TCGv temp;
431 switch (opc) {
432 /* SB-format jumps */
433 case OPC1_16_SB_J:
434 case OPC1_32_B_J:
435 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
436 break;
437 case OPC1_16_SB_CALL:
438 gen_helper_1arg(call, ctx->next_pc);
439 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
440 break;
441 case OPC1_16_SB_JZ:
442 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset);
443 break;
444 case OPC1_16_SB_JNZ:
445 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset);
446 break;
447 /* SBC-format jumps */
448 case OPC1_16_SBC_JEQ:
449 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
450 break;
451 case OPC1_16_SBC_JNE:
452 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
453 break;
454 /* SBRN-format jumps */
455 case OPC1_16_SBRN_JZ_T:
456 temp = tcg_temp_new();
457 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
458 gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
459 tcg_temp_free(temp);
460 break;
461 case OPC1_16_SBRN_JNZ_T:
462 temp = tcg_temp_new();
463 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
464 gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
465 tcg_temp_free(temp);
466 break;
467 /* SBR-format jumps */
468 case OPC1_16_SBR_JEQ:
469 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
470 offset);
471 break;
472 case OPC1_16_SBR_JNE:
473 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
474 offset);
475 break;
476 case OPC1_16_SBR_JNZ:
477 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset);
478 break;
479 case OPC1_16_SBR_JNZ_A:
480 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
481 break;
482 case OPC1_16_SBR_JGEZ:
483 gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset);
484 break;
485 case OPC1_16_SBR_JGTZ:
486 gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset);
487 break;
488 case OPC1_16_SBR_JLEZ:
489 gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset);
490 break;
491 case OPC1_16_SBR_JLTZ:
492 gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset);
493 break;
494 case OPC1_16_SBR_JZ:
495 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset);
496 break;
497 case OPC1_16_SBR_JZ_A:
498 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
499 break;
500 case OPC1_16_SBR_LOOP:
501 gen_loop(ctx, r1, offset * 2 - 32);
502 break;
503 /* SR-format jumps */
504 case OPC1_16_SR_JI:
505 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
506 tcg_gen_exit_tb(0);
507 break;
508 case OPC2_16_SR_RET:
509 gen_helper_ret(cpu_env);
510 tcg_gen_exit_tb(0);
511 break;
512 default:
513 printf("Branch Error at %x\n", ctx->pc);
515 ctx->bstate = BS_BRANCH;
520 * Functions for decoding instructions
523 static void decode_src_opc(DisasContext *ctx, int op1)
525 int r1;
526 int32_t const4;
527 TCGv temp, temp2;
529 r1 = MASK_OP_SRC_S1D(ctx->opcode);
530 const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
532 switch (op1) {
533 case OPC1_16_SRC_ADD:
534 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
535 break;
536 case OPC1_16_SRC_ADD_A15:
537 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
538 break;
539 case OPC1_16_SRC_ADD_15A:
540 gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
541 break;
542 case OPC1_16_SRC_ADD_A:
543 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
544 break;
545 case OPC1_16_SRC_CADD:
546 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
547 cpu_gpr_d[15]);
548 break;
549 case OPC1_16_SRC_CADDN:
550 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
551 cpu_gpr_d[15]);
552 break;
553 case OPC1_16_SRC_CMOV:
554 temp = tcg_const_tl(0);
555 temp2 = tcg_const_tl(const4);
556 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
557 temp2, cpu_gpr_d[r1]);
558 tcg_temp_free(temp);
559 tcg_temp_free(temp2);
560 break;
561 case OPC1_16_SRC_CMOVN:
562 temp = tcg_const_tl(0);
563 temp2 = tcg_const_tl(const4);
564 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
565 temp2, cpu_gpr_d[r1]);
566 tcg_temp_free(temp);
567 tcg_temp_free(temp2);
568 break;
569 case OPC1_16_SRC_EQ:
570 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
571 const4);
572 break;
573 case OPC1_16_SRC_LT:
574 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
575 const4);
576 break;
577 case OPC1_16_SRC_MOV:
578 tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
579 break;
580 case OPC1_16_SRC_MOV_A:
581 const4 = MASK_OP_SRC_CONST4(ctx->opcode);
582 tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
583 break;
584 case OPC1_16_SRC_SH:
585 gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
586 break;
587 case OPC1_16_SRC_SHA:
588 gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
589 break;
593 static void decode_srr_opc(DisasContext *ctx, int op1)
595 int r1, r2;
596 TCGv temp;
598 r1 = MASK_OP_SRR_S1D(ctx->opcode);
599 r2 = MASK_OP_SRR_S2(ctx->opcode);
601 switch (op1) {
602 case OPC1_16_SRR_ADD:
603 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
604 break;
605 case OPC1_16_SRR_ADD_A15:
606 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
607 break;
608 case OPC1_16_SRR_ADD_15A:
609 gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
610 break;
611 case OPC1_16_SRR_ADD_A:
612 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
613 break;
614 case OPC1_16_SRR_ADDS:
615 gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
616 break;
617 case OPC1_16_SRR_AND:
618 tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
619 break;
620 case OPC1_16_SRR_CMOV:
621 temp = tcg_const_tl(0);
622 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
623 cpu_gpr_d[r2], cpu_gpr_d[r1]);
624 tcg_temp_free(temp);
625 break;
626 case OPC1_16_SRR_CMOVN:
627 temp = tcg_const_tl(0);
628 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
629 cpu_gpr_d[r2], cpu_gpr_d[r1]);
630 tcg_temp_free(temp);
631 break;
632 case OPC1_16_SRR_EQ:
633 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
634 cpu_gpr_d[r2]);
635 break;
636 case OPC1_16_SRR_LT:
637 tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
638 cpu_gpr_d[r2]);
639 break;
640 case OPC1_16_SRR_MOV:
641 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
642 break;
643 case OPC1_16_SRR_MOV_A:
644 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
645 break;
646 case OPC1_16_SRR_MOV_AA:
647 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
648 break;
649 case OPC1_16_SRR_MOV_D:
650 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
651 break;
652 case OPC1_16_SRR_MUL:
653 gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
654 break;
655 case OPC1_16_SRR_OR:
656 tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
657 break;
658 case OPC1_16_SRR_SUB:
659 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
660 break;
661 case OPC1_16_SRR_SUB_A15B:
662 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
663 break;
664 case OPC1_16_SRR_SUB_15AB:
665 gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
666 break;
667 case OPC1_16_SRR_SUBS:
668 gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
669 break;
670 case OPC1_16_SRR_XOR:
671 tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
672 break;
676 static void decode_ssr_opc(DisasContext *ctx, int op1)
678 int r1, r2;
680 r1 = MASK_OP_SSR_S1(ctx->opcode);
681 r2 = MASK_OP_SSR_S2(ctx->opcode);
683 switch (op1) {
684 case OPC1_16_SSR_ST_A:
685 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
686 break;
687 case OPC1_16_SSR_ST_A_POSTINC:
688 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
689 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
690 break;
691 case OPC1_16_SSR_ST_B:
692 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
693 break;
694 case OPC1_16_SSR_ST_B_POSTINC:
695 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
696 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
697 break;
698 case OPC1_16_SSR_ST_H:
699 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
700 break;
701 case OPC1_16_SSR_ST_H_POSTINC:
702 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
703 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
704 break;
705 case OPC1_16_SSR_ST_W:
706 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
707 break;
708 case OPC1_16_SSR_ST_W_POSTINC:
709 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
710 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
711 break;
715 static void decode_sc_opc(DisasContext *ctx, int op1)
717 int32_t const16;
719 const16 = MASK_OP_SC_CONST8(ctx->opcode);
721 switch (op1) {
722 case OPC1_16_SC_AND:
723 tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
724 break;
725 case OPC1_16_SC_BISR:
726 gen_helper_1arg(bisr, const16 & 0xff);
727 break;
728 case OPC1_16_SC_LD_A:
729 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
730 break;
731 case OPC1_16_SC_LD_W:
732 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
733 break;
734 case OPC1_16_SC_MOV:
735 tcg_gen_movi_tl(cpu_gpr_d[15], const16);
736 break;
737 case OPC1_16_SC_OR:
738 tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
739 break;
740 case OPC1_16_SC_ST_A:
741 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
742 break;
743 case OPC1_16_SC_ST_W:
744 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
745 break;
746 case OPC1_16_SC_SUB_A:
747 tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
748 break;
752 static void decode_slr_opc(DisasContext *ctx, int op1)
754 int r1, r2;
756 r1 = MASK_OP_SLR_D(ctx->opcode);
757 r2 = MASK_OP_SLR_S2(ctx->opcode);
759 switch (op1) {
760 /* SLR-format */
761 case OPC1_16_SLR_LD_A:
762 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
763 break;
764 case OPC1_16_SLR_LD_A_POSTINC:
765 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
766 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
767 break;
768 case OPC1_16_SLR_LD_BU:
769 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
770 break;
771 case OPC1_16_SLR_LD_BU_POSTINC:
772 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
773 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
774 break;
775 case OPC1_16_SLR_LD_H:
776 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
777 break;
778 case OPC1_16_SLR_LD_H_POSTINC:
779 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
780 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
781 break;
782 case OPC1_16_SLR_LD_W:
783 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
784 break;
785 case OPC1_16_SLR_LD_W_POSTINC:
786 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
787 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
788 break;
792 static void decode_sro_opc(DisasContext *ctx, int op1)
794 int r2;
795 int32_t address;
797 r2 = MASK_OP_SRO_S2(ctx->opcode);
798 address = MASK_OP_SRO_OFF4(ctx->opcode);
800 /* SRO-format */
801 switch (op1) {
802 case OPC1_16_SRO_LD_A:
803 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
804 break;
805 case OPC1_16_SRO_LD_BU:
806 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
807 break;
808 case OPC1_16_SRO_LD_H:
809 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
810 break;
811 case OPC1_16_SRO_LD_W:
812 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
813 break;
814 case OPC1_16_SRO_ST_A:
815 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
816 break;
817 case OPC1_16_SRO_ST_B:
818 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
819 break;
820 case OPC1_16_SRO_ST_H:
821 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
822 break;
823 case OPC1_16_SRO_ST_W:
824 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
825 break;
829 static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx)
831 uint32_t op2;
832 op2 = MASK_OP_SR_OP2(ctx->opcode);
834 switch (op2) {
835 case OPC2_16_SR_NOP:
836 break;
837 case OPC2_16_SR_RET:
838 gen_compute_branch(ctx, op2, 0, 0, 0, 0);
839 break;
840 case OPC2_16_SR_RFE:
841 gen_helper_rfe(cpu_env);
842 tcg_gen_exit_tb(0);
843 ctx->bstate = BS_BRANCH;
844 break;
845 case OPC2_16_SR_DEBUG:
846 /* raise EXCP_DEBUG */
847 break;
851 static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx)
853 uint32_t op2;
854 uint32_t r1;
855 TCGv temp;
857 r1 = MASK_OP_SR_S1D(ctx->opcode);
858 op2 = MASK_OP_SR_OP2(ctx->opcode);
860 switch (op2) {
861 case OPC2_16_SR_RSUB:
862 /* overflow only if r1 = -0x80000000 */
863 temp = tcg_const_i32(-0x80000000);
864 /* calc V bit */
865 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
866 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
867 /* calc SV bit */
868 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
869 /* sub */
870 tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
871 /* calc av */
872 tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
873 tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
874 /* calc sav */
875 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
876 tcg_temp_free(temp);
877 break;
878 case OPC2_16_SR_SAT_B:
879 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
880 break;
881 case OPC2_16_SR_SAT_BU:
882 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
883 break;
884 case OPC2_16_SR_SAT_H:
885 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
886 break;
887 case OPC2_16_SR_SAT_HU:
888 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
889 break;
893 static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
895 int op1;
896 int r1, r2;
897 int32_t const16;
898 int32_t address;
899 TCGv temp;
901 op1 = MASK_OP_MAJOR(ctx->opcode);
903 /* handle ADDSC.A opcode only being 6 bit long */
904 if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
905 op1 = OPC1_16_SRRS_ADDSC_A;
908 switch (op1) {
909 case OPC1_16_SRC_ADD:
910 case OPC1_16_SRC_ADD_A15:
911 case OPC1_16_SRC_ADD_15A:
912 case OPC1_16_SRC_ADD_A:
913 case OPC1_16_SRC_CADD:
914 case OPC1_16_SRC_CADDN:
915 case OPC1_16_SRC_CMOV:
916 case OPC1_16_SRC_CMOVN:
917 case OPC1_16_SRC_EQ:
918 case OPC1_16_SRC_LT:
919 case OPC1_16_SRC_MOV:
920 case OPC1_16_SRC_MOV_A:
921 case OPC1_16_SRC_SH:
922 case OPC1_16_SRC_SHA:
923 decode_src_opc(ctx, op1);
924 break;
925 /* SRR-format */
926 case OPC1_16_SRR_ADD:
927 case OPC1_16_SRR_ADD_A15:
928 case OPC1_16_SRR_ADD_15A:
929 case OPC1_16_SRR_ADD_A:
930 case OPC1_16_SRR_ADDS:
931 case OPC1_16_SRR_AND:
932 case OPC1_16_SRR_CMOV:
933 case OPC1_16_SRR_CMOVN:
934 case OPC1_16_SRR_EQ:
935 case OPC1_16_SRR_LT:
936 case OPC1_16_SRR_MOV:
937 case OPC1_16_SRR_MOV_A:
938 case OPC1_16_SRR_MOV_AA:
939 case OPC1_16_SRR_MOV_D:
940 case OPC1_16_SRR_MUL:
941 case OPC1_16_SRR_OR:
942 case OPC1_16_SRR_SUB:
943 case OPC1_16_SRR_SUB_A15B:
944 case OPC1_16_SRR_SUB_15AB:
945 case OPC1_16_SRR_SUBS:
946 case OPC1_16_SRR_XOR:
947 decode_srr_opc(ctx, op1);
948 break;
949 /* SSR-format */
950 case OPC1_16_SSR_ST_A:
951 case OPC1_16_SSR_ST_A_POSTINC:
952 case OPC1_16_SSR_ST_B:
953 case OPC1_16_SSR_ST_B_POSTINC:
954 case OPC1_16_SSR_ST_H:
955 case OPC1_16_SSR_ST_H_POSTINC:
956 case OPC1_16_SSR_ST_W:
957 case OPC1_16_SSR_ST_W_POSTINC:
958 decode_ssr_opc(ctx, op1);
959 break;
960 /* SRRS-format */
961 case OPC1_16_SRRS_ADDSC_A:
962 r2 = MASK_OP_SRRS_S2(ctx->opcode);
963 r1 = MASK_OP_SRRS_S1D(ctx->opcode);
964 const16 = MASK_OP_SRRS_N(ctx->opcode);
965 temp = tcg_temp_new();
966 tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
967 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
968 tcg_temp_free(temp);
969 break;
970 /* SLRO-format */
971 case OPC1_16_SLRO_LD_A:
972 r1 = MASK_OP_SLRO_D(ctx->opcode);
973 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
974 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
975 break;
976 case OPC1_16_SLRO_LD_BU:
977 r1 = MASK_OP_SLRO_D(ctx->opcode);
978 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
979 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
980 break;
981 case OPC1_16_SLRO_LD_H:
982 r1 = MASK_OP_SLRO_D(ctx->opcode);
983 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
984 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
985 break;
986 case OPC1_16_SLRO_LD_W:
987 r1 = MASK_OP_SLRO_D(ctx->opcode);
988 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
989 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
990 break;
991 /* SB-format */
992 case OPC1_16_SB_CALL:
993 case OPC1_16_SB_J:
994 case OPC1_16_SB_JNZ:
995 case OPC1_16_SB_JZ:
996 address = MASK_OP_SB_DISP8_SEXT(ctx->opcode);
997 gen_compute_branch(ctx, op1, 0, 0, 0, address);
998 break;
999 /* SBC-format */
1000 case OPC1_16_SBC_JEQ:
1001 case OPC1_16_SBC_JNE:
1002 address = MASK_OP_SBC_DISP4(ctx->opcode);
1003 const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
1004 gen_compute_branch(ctx, op1, 0, 0, const16, address);
1005 break;
1006 /* SBRN-format */
1007 case OPC1_16_SBRN_JNZ_T:
1008 case OPC1_16_SBRN_JZ_T:
1009 address = MASK_OP_SBRN_DISP4(ctx->opcode);
1010 const16 = MASK_OP_SBRN_N(ctx->opcode);
1011 gen_compute_branch(ctx, op1, 0, 0, const16, address);
1012 break;
1013 /* SBR-format */
1014 case OPC1_16_SBR_JEQ:
1015 case OPC1_16_SBR_JGEZ:
1016 case OPC1_16_SBR_JGTZ:
1017 case OPC1_16_SBR_JLEZ:
1018 case OPC1_16_SBR_JLTZ:
1019 case OPC1_16_SBR_JNE:
1020 case OPC1_16_SBR_JNZ:
1021 case OPC1_16_SBR_JNZ_A:
1022 case OPC1_16_SBR_JZ:
1023 case OPC1_16_SBR_JZ_A:
1024 case OPC1_16_SBR_LOOP:
1025 r1 = MASK_OP_SBR_S2(ctx->opcode);
1026 address = MASK_OP_SBR_DISP4(ctx->opcode);
1027 gen_compute_branch(ctx, op1, r1, 0, 0, address);
1028 break;
1029 /* SC-format */
1030 case OPC1_16_SC_AND:
1031 case OPC1_16_SC_BISR:
1032 case OPC1_16_SC_LD_A:
1033 case OPC1_16_SC_LD_W:
1034 case OPC1_16_SC_MOV:
1035 case OPC1_16_SC_OR:
1036 case OPC1_16_SC_ST_A:
1037 case OPC1_16_SC_ST_W:
1038 case OPC1_16_SC_SUB_A:
1039 decode_sc_opc(ctx, op1);
1040 break;
1041 /* SLR-format */
1042 case OPC1_16_SLR_LD_A:
1043 case OPC1_16_SLR_LD_A_POSTINC:
1044 case OPC1_16_SLR_LD_BU:
1045 case OPC1_16_SLR_LD_BU_POSTINC:
1046 case OPC1_16_SLR_LD_H:
1047 case OPC1_16_SLR_LD_H_POSTINC:
1048 case OPC1_16_SLR_LD_W:
1049 case OPC1_16_SLR_LD_W_POSTINC:
1050 decode_slr_opc(ctx, op1);
1051 break;
1052 /* SRO-format */
1053 case OPC1_16_SRO_LD_A:
1054 case OPC1_16_SRO_LD_BU:
1055 case OPC1_16_SRO_LD_H:
1056 case OPC1_16_SRO_LD_W:
1057 case OPC1_16_SRO_ST_A:
1058 case OPC1_16_SRO_ST_B:
1059 case OPC1_16_SRO_ST_H:
1060 case OPC1_16_SRO_ST_W:
1061 decode_sro_opc(ctx, op1);
1062 break;
1063 /* SSRO-format */
1064 case OPC1_16_SSRO_ST_A:
1065 r1 = MASK_OP_SSRO_S1(ctx->opcode);
1066 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
1067 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
1068 break;
1069 case OPC1_16_SSRO_ST_B:
1070 r1 = MASK_OP_SSRO_S1(ctx->opcode);
1071 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
1072 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
1073 break;
1074 case OPC1_16_SSRO_ST_H:
1075 r1 = MASK_OP_SSRO_S1(ctx->opcode);
1076 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
1077 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
1078 break;
1079 case OPC1_16_SSRO_ST_W:
1080 r1 = MASK_OP_SSRO_S1(ctx->opcode);
1081 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
1082 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
1083 break;
1084 /* SR-format */
1085 case OPCM_16_SR_SYSTEM:
1086 decode_sr_system(env, ctx);
1087 break;
1088 case OPCM_16_SR_ACCU:
1089 decode_sr_accu(env, ctx);
1090 break;
1091 case OPC1_16_SR_JI:
1092 r1 = MASK_OP_SR_S1D(ctx->opcode);
1093 gen_compute_branch(ctx, op1, r1, 0, 0, 0);
1094 break;
1095 case OPC1_16_SR_NOT:
1096 r1 = MASK_OP_SR_S1D(ctx->opcode);
1097 tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
1098 break;
1102 static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
1106 static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
1108 /* 16-Bit Instruction */
1109 if ((ctx->opcode & 0x1) == 0) {
1110 ctx->next_pc = ctx->pc + 2;
1111 decode_16Bit_opc(env, ctx);
1112 /* 32-Bit Instruction */
1113 } else {
1114 ctx->next_pc = ctx->pc + 4;
1115 decode_32Bit_opc(env, ctx);
1119 static inline void
1120 gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
1121 int search_pc)
1123 CPUState *cs = CPU(cpu);
1124 CPUTriCoreState *env = &cpu->env;
1125 DisasContext ctx;
1126 target_ulong pc_start;
1127 int num_insns;
1128 uint16_t *gen_opc_end;
1130 if (search_pc) {
1131 qemu_log("search pc %d\n", search_pc);
1134 num_insns = 0;
1135 pc_start = tb->pc;
1136 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1137 ctx.pc = pc_start;
1138 ctx.saved_pc = -1;
1139 ctx.tb = tb;
1140 ctx.singlestep_enabled = cs->singlestep_enabled;
1141 ctx.bstate = BS_NONE;
1142 ctx.mem_idx = cpu_mmu_index(env);
1144 tcg_clear_temp_count();
1145 gen_tb_start();
1146 while (ctx.bstate == BS_NONE) {
1147 ctx.opcode = cpu_ldl_code(env, ctx.pc);
1148 decode_opc(env, &ctx, 0);
1150 num_insns++;
1152 if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
1153 gen_save_pc(ctx.next_pc);
1154 tcg_gen_exit_tb(0);
1155 break;
1157 if (singlestep) {
1158 gen_save_pc(ctx.next_pc);
1159 tcg_gen_exit_tb(0);
1160 break;
1162 ctx.pc = ctx.next_pc;
1165 gen_tb_end(tb, num_insns);
1166 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1167 if (search_pc) {
1168 printf("done_generating search pc\n");
1169 } else {
1170 tb->size = ctx.pc - pc_start;
1171 tb->icount = num_insns;
1173 if (tcg_check_temp_count()) {
1174 printf("LEAK at %08x\n", env->PC);
1177 #ifdef DEBUG_DISAS
1178 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1179 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1180 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
1181 qemu_log("\n");
1183 #endif
1186 void
1187 gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
1189 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
1192 void
1193 gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
1195 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
1198 void
1199 restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
1201 env->PC = tcg_ctx.gen_opc_pc[pc_pos];
1205 * Initialization
1209 void cpu_state_reset(CPUTriCoreState *env)
1211 /* Reset Regs to Default Value */
1212 env->PSW = 0xb80;
1215 static void tricore_tcg_init_csfr(void)
1217 cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
1218 offsetof(CPUTriCoreState, PCXI), "PCXI");
1219 cpu_PSW = tcg_global_mem_new(TCG_AREG0,
1220 offsetof(CPUTriCoreState, PSW), "PSW");
1221 cpu_PC = tcg_global_mem_new(TCG_AREG0,
1222 offsetof(CPUTriCoreState, PC), "PC");
1223 cpu_ICR = tcg_global_mem_new(TCG_AREG0,
1224 offsetof(CPUTriCoreState, ICR), "ICR");
1227 void tricore_tcg_init(void)
1229 int i;
1230 static int inited;
1231 if (inited) {
1232 return;
1234 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1235 /* reg init */
1236 for (i = 0 ; i < 16 ; i++) {
1237 cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
1238 offsetof(CPUTriCoreState, gpr_a[i]),
1239 regnames_a[i]);
1241 for (i = 0 ; i < 16 ; i++) {
1242 cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
1243 offsetof(CPUTriCoreState, gpr_d[i]),
1244 regnames_d[i]);
1246 tricore_tcg_init_csfr();
1247 /* init PSW flag cache */
1248 cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
1249 offsetof(CPUTriCoreState, PSW_USB_C),
1250 "PSW_C");
1251 cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
1252 offsetof(CPUTriCoreState, PSW_USB_V),
1253 "PSW_V");
1254 cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
1255 offsetof(CPUTriCoreState, PSW_USB_SV),
1256 "PSW_SV");
1257 cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
1258 offsetof(CPUTriCoreState, PSW_USB_AV),
1259 "PSW_AV");
1260 cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
1261 offsetof(CPUTriCoreState, PSW_USB_SAV),
1262 "PSW_SAV");