2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
119 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
120 ((offset & 0x0fffff) << 1))
122 /* Functions for load/save to/from memory */
124 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
125 int16_t con
, TCGMemOp mop
)
127 TCGv temp
= tcg_temp_new();
128 tcg_gen_addi_tl(temp
, r2
, con
);
129 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
133 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
134 int16_t con
, TCGMemOp mop
)
136 TCGv temp
= tcg_temp_new();
137 tcg_gen_addi_tl(temp
, r2
, con
);
138 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
142 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
144 TCGv_i64 temp
= tcg_temp_new_i64();
146 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
147 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
149 tcg_temp_free_i64(temp
);
152 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
155 TCGv temp
= tcg_temp_new();
156 tcg_gen_addi_tl(temp
, base
, con
);
157 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
161 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
163 TCGv_i64 temp
= tcg_temp_new_i64();
165 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
166 /* write back to two 32 bit regs */
167 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
169 tcg_temp_free_i64(temp
);
172 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
175 TCGv temp
= tcg_temp_new();
176 tcg_gen_addi_tl(temp
, base
, con
);
177 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
181 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
184 TCGv temp
= tcg_temp_new();
185 tcg_gen_addi_tl(temp
, r2
, off
);
186 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
187 tcg_gen_mov_tl(r2
, temp
);
191 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
194 TCGv temp
= tcg_temp_new();
195 tcg_gen_addi_tl(temp
, r2
, off
);
196 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
197 tcg_gen_mov_tl(r2
, temp
);
201 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
202 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
204 TCGv temp
= tcg_temp_new();
205 TCGv temp2
= tcg_temp_new();
207 /* temp = (M(EA, word) */
208 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
209 /* temp = temp & ~E[a][63:32]) */
210 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
211 /* temp2 = (E[a][31:0] & E[a][63:32]); */
212 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
213 /* temp = temp | temp2; */
214 tcg_gen_or_tl(temp
, temp
, temp2
);
215 /* M(EA, word) = temp; */
216 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
219 tcg_temp_free(temp2
);
222 /* tmp = M(EA, word);
225 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
227 TCGv temp
= tcg_temp_new();
229 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
230 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
231 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
236 /* Functions for arithmetic instructions */
238 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
240 TCGv t0
= tcg_temp_new_i32();
241 TCGv result
= tcg_temp_new_i32();
242 /* Addition and set V/SV bits */
243 tcg_gen_add_tl(result
, r1
, r2
);
245 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
246 tcg_gen_xor_tl(t0
, r1
, r2
);
247 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
249 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
250 /* Calc AV/SAV bits */
251 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
252 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
254 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
255 /* write back result */
256 tcg_gen_mov_tl(ret
, result
);
258 tcg_temp_free(result
);
262 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
264 TCGv temp
= tcg_const_i32(r2
);
265 gen_add_d(ret
, r1
, temp
);
269 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
272 TCGv temp
= tcg_temp_new();
273 TCGv temp2
= tcg_temp_new();
274 TCGv result
= tcg_temp_new();
275 TCGv mask
= tcg_temp_new();
276 TCGv t0
= tcg_const_i32(0);
278 /* create mask for sticky bits */
279 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
280 tcg_gen_shli_tl(mask
, mask
, 31);
282 tcg_gen_add_tl(result
, r1
, r2
);
284 tcg_gen_xor_tl(temp
, result
, r1
);
285 tcg_gen_xor_tl(temp2
, r1
, r2
);
286 tcg_gen_andc_tl(temp
, temp
, temp2
);
287 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
289 tcg_gen_and_tl(temp
, temp
, mask
);
290 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
292 tcg_gen_add_tl(temp
, result
, result
);
293 tcg_gen_xor_tl(temp
, temp
, result
);
294 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
296 tcg_gen_and_tl(temp
, temp
, mask
);
297 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
298 /* write back result */
299 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
303 tcg_temp_free(temp2
);
304 tcg_temp_free(result
);
308 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
311 TCGv temp
= tcg_const_i32(r2
);
312 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
316 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
318 TCGv temp
= tcg_temp_new_i32();
319 TCGv result
= tcg_temp_new_i32();
321 tcg_gen_sub_tl(result
, r1
, r2
);
323 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
324 tcg_gen_xor_tl(temp
, r1
, r2
);
325 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
327 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
329 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
330 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
332 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
333 /* write back result */
334 tcg_gen_mov_tl(ret
, result
);
337 tcg_temp_free(result
);
340 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
342 TCGv high
= tcg_temp_new();
343 TCGv low
= tcg_temp_new();
345 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
346 tcg_gen_mov_tl(ret
, low
);
348 tcg_gen_sari_tl(low
, low
, 31);
349 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
350 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
352 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
354 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
355 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
357 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
363 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
365 TCGv sat_neg
= tcg_const_i32(low
);
366 TCGv temp
= tcg_const_i32(up
);
368 /* sat_neg = (arg < low ) ? low : arg; */
369 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
371 /* ret = (sat_neg > up ) ? up : sat_neg; */
372 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
374 tcg_temp_free(sat_neg
);
378 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
380 TCGv temp
= tcg_const_i32(up
);
381 /* sat_neg = (arg > up ) ? up : arg; */
382 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
386 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
388 if (shift_count
== -32) {
389 tcg_gen_movi_tl(ret
, 0);
390 } else if (shift_count
>= 0) {
391 tcg_gen_shli_tl(ret
, r1
, shift_count
);
393 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
397 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
399 uint32_t msk
, msk_start
;
400 TCGv temp
= tcg_temp_new();
401 TCGv temp2
= tcg_temp_new();
402 TCGv t_0
= tcg_const_i32(0);
404 if (shift_count
== 0) {
405 /* Clear PSW.C and PSW.V */
406 tcg_gen_movi_tl(cpu_PSW_C
, 0);
407 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
408 tcg_gen_mov_tl(ret
, r1
);
409 } else if (shift_count
== -32) {
411 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
412 /* fill ret completly with sign bit */
413 tcg_gen_sari_tl(ret
, r1
, 31);
415 tcg_gen_movi_tl(cpu_PSW_V
, 0);
416 } else if (shift_count
> 0) {
417 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
418 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
421 msk_start
= 32 - shift_count
;
422 msk
= ((1 << shift_count
) - 1) << msk_start
;
423 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
425 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
426 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
427 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
428 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
430 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
432 tcg_gen_shli_tl(ret
, r1
, shift_count
);
434 tcg_temp_free(t_max
);
435 tcg_temp_free(t_min
);
438 tcg_gen_movi_tl(cpu_PSW_V
, 0);
440 msk
= (1 << -shift_count
) - 1;
441 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
443 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
445 /* calc av overflow bit */
446 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
447 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
448 /* calc sav overflow bit */
449 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
452 tcg_temp_free(temp2
);
456 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
458 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
461 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
463 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
466 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
468 void(*op1
)(TCGv
, TCGv
, TCGv
),
469 void(*op2
)(TCGv
, TCGv
, TCGv
))
473 temp1
= tcg_temp_new();
474 temp2
= tcg_temp_new();
476 tcg_gen_shri_tl(temp2
, r2
, pos2
);
477 tcg_gen_shri_tl(temp1
, r1
, pos1
);
479 (*op1
)(temp1
, temp1
, temp2
);
480 (*op2
)(temp1
, ret
, temp1
);
482 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
484 tcg_temp_free(temp1
);
485 tcg_temp_free(temp2
);
488 /* ret = r1[pos1] op1 r2[pos2]; */
489 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
491 void(*op1
)(TCGv
, TCGv
, TCGv
))
495 temp1
= tcg_temp_new();
496 temp2
= tcg_temp_new();
498 tcg_gen_shri_tl(temp2
, r2
, pos2
);
499 tcg_gen_shri_tl(temp1
, r1
, pos1
);
501 (*op1
)(ret
, temp1
, temp2
);
503 tcg_gen_andi_tl(ret
, ret
, 0x1);
505 tcg_temp_free(temp1
);
506 tcg_temp_free(temp2
);
509 /* helpers for generating program flow micro-ops */
511 static inline void gen_save_pc(target_ulong pc
)
513 tcg_gen_movi_tl(cpu_PC
, pc
);
516 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
518 TranslationBlock
*tb
;
520 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
521 likely(!ctx
->singlestep_enabled
)) {
524 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
527 if (ctx
->singlestep_enabled
) {
528 /* raise exception debug */
534 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
535 TCGv r2
, int16_t address
)
538 jumpLabel
= gen_new_label();
539 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
541 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
543 gen_set_label(jumpLabel
);
544 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
547 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
548 int r2
, int16_t address
)
550 TCGv temp
= tcg_const_i32(r2
);
551 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
555 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
558 l1
= gen_new_label();
560 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
561 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
562 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
564 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
567 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
568 int r2
, int32_t constant
, int32_t offset
)
574 /* SB-format jumps */
577 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
580 case OPC1_16_SB_CALL
:
581 gen_helper_1arg(call
, ctx
->next_pc
);
582 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
585 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
588 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
590 /* SBC-format jumps */
591 case OPC1_16_SBC_JEQ
:
592 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
594 case OPC1_16_SBC_JNE
:
595 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
597 /* SBRN-format jumps */
598 case OPC1_16_SBRN_JZ_T
:
599 temp
= tcg_temp_new();
600 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
601 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
604 case OPC1_16_SBRN_JNZ_T
:
605 temp
= tcg_temp_new();
606 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
607 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
610 /* SBR-format jumps */
611 case OPC1_16_SBR_JEQ
:
612 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
615 case OPC1_16_SBR_JNE
:
616 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
619 case OPC1_16_SBR_JNZ
:
620 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
622 case OPC1_16_SBR_JNZ_A
:
623 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
625 case OPC1_16_SBR_JGEZ
:
626 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
628 case OPC1_16_SBR_JGTZ
:
629 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
631 case OPC1_16_SBR_JLEZ
:
632 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
634 case OPC1_16_SBR_JLTZ
:
635 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
638 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
640 case OPC1_16_SBR_JZ_A
:
641 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
643 case OPC1_16_SBR_LOOP
:
644 gen_loop(ctx
, r1
, offset
* 2 - 32);
646 /* SR-format jumps */
648 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
652 gen_helper_ret(cpu_env
);
656 case OPC1_32_B_CALLA
:
657 gen_helper_1arg(call
, ctx
->next_pc
);
658 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
661 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
663 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
666 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
667 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
670 case OPCM_32_BRC_EQ_NEQ
:
671 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
672 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
674 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
678 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
679 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
681 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
682 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
686 case OPCM_32_BRC_JLT
:
687 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
688 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
690 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
691 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
695 case OPCM_32_BRC_JNE
:
696 temp
= tcg_temp_new();
697 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
698 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
699 /* subi is unconditional */
700 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
701 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
703 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
704 /* addi is unconditional */
705 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
706 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
711 case OPCM_32_BRN_JTT
:
712 n
= MASK_OP_BRN_N(ctx
->opcode
);
714 temp
= tcg_temp_new();
715 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
717 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
718 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
720 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
725 printf("Branch Error at %x\n", ctx
->pc
);
727 ctx
->bstate
= BS_BRANCH
;
732 * Functions for decoding instructions
735 static void decode_src_opc(DisasContext
*ctx
, int op1
)
741 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
742 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
745 case OPC1_16_SRC_ADD
:
746 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
748 case OPC1_16_SRC_ADD_A15
:
749 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
751 case OPC1_16_SRC_ADD_15A
:
752 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
754 case OPC1_16_SRC_ADD_A
:
755 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
757 case OPC1_16_SRC_CADD
:
758 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
761 case OPC1_16_SRC_CADDN
:
762 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
765 case OPC1_16_SRC_CMOV
:
766 temp
= tcg_const_tl(0);
767 temp2
= tcg_const_tl(const4
);
768 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
769 temp2
, cpu_gpr_d
[r1
]);
771 tcg_temp_free(temp2
);
773 case OPC1_16_SRC_CMOVN
:
774 temp
= tcg_const_tl(0);
775 temp2
= tcg_const_tl(const4
);
776 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
777 temp2
, cpu_gpr_d
[r1
]);
779 tcg_temp_free(temp2
);
782 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
786 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
789 case OPC1_16_SRC_MOV
:
790 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
792 case OPC1_16_SRC_MOV_A
:
793 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
794 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
797 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
799 case OPC1_16_SRC_SHA
:
800 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
805 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
810 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
811 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
814 case OPC1_16_SRR_ADD
:
815 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
817 case OPC1_16_SRR_ADD_A15
:
818 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
820 case OPC1_16_SRR_ADD_15A
:
821 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
823 case OPC1_16_SRR_ADD_A
:
824 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
826 case OPC1_16_SRR_ADDS
:
827 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
829 case OPC1_16_SRR_AND
:
830 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
832 case OPC1_16_SRR_CMOV
:
833 temp
= tcg_const_tl(0);
834 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
835 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
838 case OPC1_16_SRR_CMOVN
:
839 temp
= tcg_const_tl(0);
840 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
841 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
845 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
849 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
852 case OPC1_16_SRR_MOV
:
853 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
855 case OPC1_16_SRR_MOV_A
:
856 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
858 case OPC1_16_SRR_MOV_AA
:
859 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
861 case OPC1_16_SRR_MOV_D
:
862 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
864 case OPC1_16_SRR_MUL
:
865 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
868 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
870 case OPC1_16_SRR_SUB
:
871 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
873 case OPC1_16_SRR_SUB_A15B
:
874 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
876 case OPC1_16_SRR_SUB_15AB
:
877 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
879 case OPC1_16_SRR_SUBS
:
880 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
882 case OPC1_16_SRR_XOR
:
883 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
888 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
892 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
893 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
896 case OPC1_16_SSR_ST_A
:
897 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
899 case OPC1_16_SSR_ST_A_POSTINC
:
900 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
901 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
903 case OPC1_16_SSR_ST_B
:
904 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
906 case OPC1_16_SSR_ST_B_POSTINC
:
907 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
908 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
910 case OPC1_16_SSR_ST_H
:
911 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
913 case OPC1_16_SSR_ST_H_POSTINC
:
914 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
915 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
917 case OPC1_16_SSR_ST_W
:
918 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
920 case OPC1_16_SSR_ST_W_POSTINC
:
921 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
922 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
927 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
931 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
935 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
937 case OPC1_16_SC_BISR
:
938 gen_helper_1arg(bisr
, const16
& 0xff);
940 case OPC1_16_SC_LD_A
:
941 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
943 case OPC1_16_SC_LD_W
:
944 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
947 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
950 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
952 case OPC1_16_SC_ST_A
:
953 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
955 case OPC1_16_SC_ST_W
:
956 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
958 case OPC1_16_SC_SUB_A
:
959 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
964 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
968 r1
= MASK_OP_SLR_D(ctx
->opcode
);
969 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
973 case OPC1_16_SLR_LD_A
:
974 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
976 case OPC1_16_SLR_LD_A_POSTINC
:
977 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
978 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
980 case OPC1_16_SLR_LD_BU
:
981 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
983 case OPC1_16_SLR_LD_BU_POSTINC
:
984 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
985 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
987 case OPC1_16_SLR_LD_H
:
988 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
990 case OPC1_16_SLR_LD_H_POSTINC
:
991 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
992 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
994 case OPC1_16_SLR_LD_W
:
995 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
997 case OPC1_16_SLR_LD_W_POSTINC
:
998 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
999 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1004 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
1009 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
1010 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
1014 case OPC1_16_SRO_LD_A
:
1015 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1017 case OPC1_16_SRO_LD_BU
:
1018 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
1020 case OPC1_16_SRO_LD_H
:
1021 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
1023 case OPC1_16_SRO_LD_W
:
1024 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1026 case OPC1_16_SRO_ST_A
:
1027 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1029 case OPC1_16_SRO_ST_B
:
1030 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
1032 case OPC1_16_SRO_ST_H
:
1033 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
1035 case OPC1_16_SRO_ST_W
:
1036 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1041 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
1044 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
1047 case OPC2_16_SR_NOP
:
1049 case OPC2_16_SR_RET
:
1050 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
1052 case OPC2_16_SR_RFE
:
1053 gen_helper_rfe(cpu_env
);
1055 ctx
->bstate
= BS_BRANCH
;
1057 case OPC2_16_SR_DEBUG
:
1058 /* raise EXCP_DEBUG */
1063 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
1069 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1070 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
1073 case OPC2_16_SR_RSUB
:
1074 /* overflow only if r1 = -0x80000000 */
1075 temp
= tcg_const_i32(-0x80000000);
1077 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
1078 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1080 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1082 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1084 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1085 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
1087 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1088 tcg_temp_free(temp
);
1090 case OPC2_16_SR_SAT_B
:
1091 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
1093 case OPC2_16_SR_SAT_BU
:
1094 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
1096 case OPC2_16_SR_SAT_H
:
1097 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
1099 case OPC2_16_SR_SAT_HU
:
1100 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
1105 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1113 op1
= MASK_OP_MAJOR(ctx
->opcode
);
1115 /* handle ADDSC.A opcode only being 6 bit long */
1116 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
1117 op1
= OPC1_16_SRRS_ADDSC_A
;
1121 case OPC1_16_SRC_ADD
:
1122 case OPC1_16_SRC_ADD_A15
:
1123 case OPC1_16_SRC_ADD_15A
:
1124 case OPC1_16_SRC_ADD_A
:
1125 case OPC1_16_SRC_CADD
:
1126 case OPC1_16_SRC_CADDN
:
1127 case OPC1_16_SRC_CMOV
:
1128 case OPC1_16_SRC_CMOVN
:
1129 case OPC1_16_SRC_EQ
:
1130 case OPC1_16_SRC_LT
:
1131 case OPC1_16_SRC_MOV
:
1132 case OPC1_16_SRC_MOV_A
:
1133 case OPC1_16_SRC_SH
:
1134 case OPC1_16_SRC_SHA
:
1135 decode_src_opc(ctx
, op1
);
1138 case OPC1_16_SRR_ADD
:
1139 case OPC1_16_SRR_ADD_A15
:
1140 case OPC1_16_SRR_ADD_15A
:
1141 case OPC1_16_SRR_ADD_A
:
1142 case OPC1_16_SRR_ADDS
:
1143 case OPC1_16_SRR_AND
:
1144 case OPC1_16_SRR_CMOV
:
1145 case OPC1_16_SRR_CMOVN
:
1146 case OPC1_16_SRR_EQ
:
1147 case OPC1_16_SRR_LT
:
1148 case OPC1_16_SRR_MOV
:
1149 case OPC1_16_SRR_MOV_A
:
1150 case OPC1_16_SRR_MOV_AA
:
1151 case OPC1_16_SRR_MOV_D
:
1152 case OPC1_16_SRR_MUL
:
1153 case OPC1_16_SRR_OR
:
1154 case OPC1_16_SRR_SUB
:
1155 case OPC1_16_SRR_SUB_A15B
:
1156 case OPC1_16_SRR_SUB_15AB
:
1157 case OPC1_16_SRR_SUBS
:
1158 case OPC1_16_SRR_XOR
:
1159 decode_srr_opc(ctx
, op1
);
1162 case OPC1_16_SSR_ST_A
:
1163 case OPC1_16_SSR_ST_A_POSTINC
:
1164 case OPC1_16_SSR_ST_B
:
1165 case OPC1_16_SSR_ST_B_POSTINC
:
1166 case OPC1_16_SSR_ST_H
:
1167 case OPC1_16_SSR_ST_H_POSTINC
:
1168 case OPC1_16_SSR_ST_W
:
1169 case OPC1_16_SSR_ST_W_POSTINC
:
1170 decode_ssr_opc(ctx
, op1
);
1173 case OPC1_16_SRRS_ADDSC_A
:
1174 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
1175 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
1176 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
1177 temp
= tcg_temp_new();
1178 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
1179 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
1180 tcg_temp_free(temp
);
1183 case OPC1_16_SLRO_LD_A
:
1184 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1185 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1186 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1188 case OPC1_16_SLRO_LD_BU
:
1189 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1190 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1191 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1193 case OPC1_16_SLRO_LD_H
:
1194 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1195 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1196 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1198 case OPC1_16_SLRO_LD_W
:
1199 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1200 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1201 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1204 case OPC1_16_SB_CALL
:
1206 case OPC1_16_SB_JNZ
:
1208 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
1209 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
1212 case OPC1_16_SBC_JEQ
:
1213 case OPC1_16_SBC_JNE
:
1214 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
1215 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
1216 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1219 case OPC1_16_SBRN_JNZ_T
:
1220 case OPC1_16_SBRN_JZ_T
:
1221 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
1222 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
1223 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1226 case OPC1_16_SBR_JEQ
:
1227 case OPC1_16_SBR_JGEZ
:
1228 case OPC1_16_SBR_JGTZ
:
1229 case OPC1_16_SBR_JLEZ
:
1230 case OPC1_16_SBR_JLTZ
:
1231 case OPC1_16_SBR_JNE
:
1232 case OPC1_16_SBR_JNZ
:
1233 case OPC1_16_SBR_JNZ_A
:
1234 case OPC1_16_SBR_JZ
:
1235 case OPC1_16_SBR_JZ_A
:
1236 case OPC1_16_SBR_LOOP
:
1237 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
1238 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
1239 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
1242 case OPC1_16_SC_AND
:
1243 case OPC1_16_SC_BISR
:
1244 case OPC1_16_SC_LD_A
:
1245 case OPC1_16_SC_LD_W
:
1246 case OPC1_16_SC_MOV
:
1248 case OPC1_16_SC_ST_A
:
1249 case OPC1_16_SC_ST_W
:
1250 case OPC1_16_SC_SUB_A
:
1251 decode_sc_opc(ctx
, op1
);
1254 case OPC1_16_SLR_LD_A
:
1255 case OPC1_16_SLR_LD_A_POSTINC
:
1256 case OPC1_16_SLR_LD_BU
:
1257 case OPC1_16_SLR_LD_BU_POSTINC
:
1258 case OPC1_16_SLR_LD_H
:
1259 case OPC1_16_SLR_LD_H_POSTINC
:
1260 case OPC1_16_SLR_LD_W
:
1261 case OPC1_16_SLR_LD_W_POSTINC
:
1262 decode_slr_opc(ctx
, op1
);
1265 case OPC1_16_SRO_LD_A
:
1266 case OPC1_16_SRO_LD_BU
:
1267 case OPC1_16_SRO_LD_H
:
1268 case OPC1_16_SRO_LD_W
:
1269 case OPC1_16_SRO_ST_A
:
1270 case OPC1_16_SRO_ST_B
:
1271 case OPC1_16_SRO_ST_H
:
1272 case OPC1_16_SRO_ST_W
:
1273 decode_sro_opc(ctx
, op1
);
1276 case OPC1_16_SSRO_ST_A
:
1277 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1278 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1279 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1281 case OPC1_16_SSRO_ST_B
:
1282 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1283 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1284 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1286 case OPC1_16_SSRO_ST_H
:
1287 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1288 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1289 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1291 case OPC1_16_SSRO_ST_W
:
1292 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1293 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1294 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1297 case OPCM_16_SR_SYSTEM
:
1298 decode_sr_system(env
, ctx
);
1300 case OPCM_16_SR_ACCU
:
1301 decode_sr_accu(env
, ctx
);
1304 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1305 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
1307 case OPC1_16_SR_NOT
:
1308 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1309 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1315 * 32 bit instructions
1319 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
1326 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1327 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1328 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1330 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1333 case OPC2_32_ABS_LD_A
:
1334 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1336 case OPC2_32_ABS_LD_D
:
1337 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1339 case OPC2_32_ABS_LD_DA
:
1340 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1342 case OPC2_32_ABS_LD_W
:
1343 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1347 tcg_temp_free(temp
);
1350 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
1357 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1358 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1359 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1361 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1364 case OPC2_32_ABS_LD_B
:
1365 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
1367 case OPC2_32_ABS_LD_BU
:
1368 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1370 case OPC2_32_ABS_LD_H
:
1371 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
1373 case OPC2_32_ABS_LD_HU
:
1374 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1378 tcg_temp_free(temp
);
1381 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
1388 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1389 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1390 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1392 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1395 case OPC2_32_ABS_LDMST
:
1396 gen_ldmst(ctx
, r1
, temp
);
1398 case OPC2_32_ABS_SWAP_W
:
1399 gen_swap(ctx
, r1
, temp
);
1403 tcg_temp_free(temp
);
1406 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
1411 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1412 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1415 case OPC2_32_ABS_LDLCX
:
1416 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
1418 case OPC2_32_ABS_LDUCX
:
1419 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
1421 case OPC2_32_ABS_STLCX
:
1422 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
1424 case OPC2_32_ABS_STUCX
:
1425 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
1430 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
1437 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1438 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1439 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1441 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1444 case OPC2_32_ABS_ST_A
:
1445 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1447 case OPC2_32_ABS_ST_D
:
1448 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1450 case OPC2_32_ABS_ST_DA
:
1451 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1453 case OPC2_32_ABS_ST_W
:
1454 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1458 tcg_temp_free(temp
);
1461 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
1468 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1469 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1470 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1472 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1475 case OPC2_32_ABS_ST_B
:
1476 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1478 case OPC2_32_ABS_ST_H
:
1479 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1482 tcg_temp_free(temp
);
1487 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1493 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1494 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1495 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1496 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1497 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1498 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1502 case OPC2_32_BIT_AND_AND_T
:
1503 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1504 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
1506 case OPC2_32_BIT_AND_ANDN_T
:
1507 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1508 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
1510 case OPC2_32_BIT_AND_NOR_T
:
1511 if (TCG_TARGET_HAS_andc_i32
) {
1512 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1513 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
1515 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1516 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
1519 case OPC2_32_BIT_AND_OR_T
:
1520 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1521 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
1526 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
1531 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1532 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1533 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1534 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1535 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1536 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1539 case OPC2_32_BIT_AND_T
:
1540 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1541 pos1
, pos2
, &tcg_gen_and_tl
);
1543 case OPC2_32_BIT_ANDN_T
:
1544 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1545 pos1
, pos2
, &tcg_gen_andc_tl
);
1547 case OPC2_32_BIT_NOR_T
:
1548 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1549 pos1
, pos2
, &tcg_gen_nor_tl
);
1551 case OPC2_32_BIT_OR_T
:
1552 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1553 pos1
, pos2
, &tcg_gen_or_tl
);
1558 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
1564 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1565 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1566 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1567 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1568 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1569 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1571 temp
= tcg_temp_new();
1573 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
1574 if (op2
== OPC2_32_BIT_INSN_T
) {
1575 tcg_gen_not_tl(temp
, temp
);
1577 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
1578 tcg_temp_free(temp
);
1581 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1588 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1589 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1590 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1591 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1592 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1593 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1596 case OPC2_32_BIT_NAND_T
:
1597 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1598 pos1
, pos2
, &tcg_gen_nand_tl
);
1600 case OPC2_32_BIT_ORN_T
:
1601 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1602 pos1
, pos2
, &tcg_gen_orc_tl
);
1604 case OPC2_32_BIT_XNOR_T
:
1605 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1606 pos1
, pos2
, &tcg_gen_eqv_tl
);
1608 case OPC2_32_BIT_XOR_T
:
1609 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1610 pos1
, pos2
, &tcg_gen_xor_tl
);
1615 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
1622 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1623 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1624 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1625 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1626 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1627 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1630 case OPC2_32_BIT_OR_AND_T
:
1631 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1632 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
1634 case OPC2_32_BIT_OR_ANDN_T
:
1635 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1636 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
1638 case OPC2_32_BIT_OR_NOR_T
:
1639 if (TCG_TARGET_HAS_orc_i32
) {
1640 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1641 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
1643 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1644 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
1647 case OPC2_32_BIT_OR_OR_T
:
1648 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1649 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
1654 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
1661 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1662 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1663 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1664 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1665 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1666 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1668 temp
= tcg_temp_new();
1671 case OPC2_32_BIT_SH_AND_T
:
1672 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1673 pos1
, pos2
, &tcg_gen_and_tl
);
1675 case OPC2_32_BIT_SH_ANDN_T
:
1676 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1677 pos1
, pos2
, &tcg_gen_andc_tl
);
1679 case OPC2_32_BIT_SH_NOR_T
:
1680 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1681 pos1
, pos2
, &tcg_gen_nor_tl
);
1683 case OPC2_32_BIT_SH_OR_T
:
1684 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1685 pos1
, pos2
, &tcg_gen_or_tl
);
1688 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1689 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1690 tcg_temp_free(temp
);
1693 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1700 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1701 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1702 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1703 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1704 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1705 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1707 temp
= tcg_temp_new();
1710 case OPC2_32_BIT_SH_NAND_T
:
1711 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
1712 pos1
, pos2
, &tcg_gen_nand_tl
);
1714 case OPC2_32_BIT_SH_ORN_T
:
1715 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1716 pos1
, pos2
, &tcg_gen_orc_tl
);
1718 case OPC2_32_BIT_SH_XNOR_T
:
1719 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1720 pos1
, pos2
, &tcg_gen_eqv_tl
);
1722 case OPC2_32_BIT_SH_XOR_T
:
1723 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1724 pos1
, pos2
, &tcg_gen_xor_tl
);
1727 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1728 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1729 tcg_temp_free(temp
);
1735 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
1743 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1744 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1745 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1746 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
1749 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
1750 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
1751 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
1752 /* instruction to access the cache */
1754 case OPC2_32_BO_CACHEA_WI_POSTINC
:
1755 case OPC2_32_BO_CACHEA_W_POSTINC
:
1756 case OPC2_32_BO_CACHEA_I_POSTINC
:
1757 /* instruction to access the cache, but we still need to handle
1758 the addressing mode */
1759 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1761 case OPC2_32_BO_CACHEA_WI_PREINC
:
1762 case OPC2_32_BO_CACHEA_W_PREINC
:
1763 case OPC2_32_BO_CACHEA_I_PREINC
:
1764 /* instruction to access the cache, but we still need to handle
1765 the addressing mode */
1766 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1768 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
1769 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
1770 /* TODO: Raise illegal opcode trap,
1771 if tricore_feature(TRICORE_FEATURE_13) */
1773 case OPC2_32_BO_CACHEI_W_POSTINC
:
1774 case OPC2_32_BO_CACHEI_WI_POSTINC
:
1775 if (!tricore_feature(env
, TRICORE_FEATURE_13
)) {
1776 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1777 } /* TODO: else raise illegal opcode trap */
1779 case OPC2_32_BO_CACHEI_W_PREINC
:
1780 case OPC2_32_BO_CACHEI_WI_PREINC
:
1781 if (!tricore_feature(env
, TRICORE_FEATURE_13
)) {
1782 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1783 } /* TODO: else raise illegal opcode trap */
1785 case OPC2_32_BO_ST_A_SHORTOFF
:
1786 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
1788 case OPC2_32_BO_ST_A_POSTINC
:
1789 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1791 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1793 case OPC2_32_BO_ST_A_PREINC
:
1794 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
1796 case OPC2_32_BO_ST_B_SHORTOFF
:
1797 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
1799 case OPC2_32_BO_ST_B_POSTINC
:
1800 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1802 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1804 case OPC2_32_BO_ST_B_PREINC
:
1805 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
1807 case OPC2_32_BO_ST_D_SHORTOFF
:
1808 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
1811 case OPC2_32_BO_ST_D_POSTINC
:
1812 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
1813 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1815 case OPC2_32_BO_ST_D_PREINC
:
1816 temp
= tcg_temp_new();
1817 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
1818 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1819 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
1820 tcg_temp_free(temp
);
1822 case OPC2_32_BO_ST_DA_SHORTOFF
:
1823 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1826 case OPC2_32_BO_ST_DA_POSTINC
:
1827 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
1828 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1830 case OPC2_32_BO_ST_DA_PREINC
:
1831 temp
= tcg_temp_new();
1832 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
1833 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1834 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
1835 tcg_temp_free(temp
);
1837 case OPC2_32_BO_ST_H_SHORTOFF
:
1838 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1840 case OPC2_32_BO_ST_H_POSTINC
:
1841 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1843 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1845 case OPC2_32_BO_ST_H_PREINC
:
1846 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1848 case OPC2_32_BO_ST_Q_SHORTOFF
:
1849 temp
= tcg_temp_new();
1850 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1851 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1852 tcg_temp_free(temp
);
1854 case OPC2_32_BO_ST_Q_POSTINC
:
1855 temp
= tcg_temp_new();
1856 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1857 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
1859 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1860 tcg_temp_free(temp
);
1862 case OPC2_32_BO_ST_Q_PREINC
:
1863 temp
= tcg_temp_new();
1864 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1865 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1866 tcg_temp_free(temp
);
1868 case OPC2_32_BO_ST_W_SHORTOFF
:
1869 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1871 case OPC2_32_BO_ST_W_POSTINC
:
1872 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1874 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1876 case OPC2_32_BO_ST_W_PREINC
:
1877 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1882 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
1888 TCGv temp
, temp2
, temp3
;
1890 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1891 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1892 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1893 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
1895 temp
= tcg_temp_new();
1896 temp2
= tcg_temp_new();
1897 temp3
= tcg_const_i32(off10
);
1899 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
1900 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1903 case OPC2_32_BO_CACHEA_WI_BR
:
1904 case OPC2_32_BO_CACHEA_W_BR
:
1905 case OPC2_32_BO_CACHEA_I_BR
:
1906 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1908 case OPC2_32_BO_CACHEA_WI_CIRC
:
1909 case OPC2_32_BO_CACHEA_W_CIRC
:
1910 case OPC2_32_BO_CACHEA_I_CIRC
:
1911 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1913 case OPC2_32_BO_ST_A_BR
:
1914 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1915 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1917 case OPC2_32_BO_ST_A_CIRC
:
1918 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1919 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1921 case OPC2_32_BO_ST_B_BR
:
1922 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
1923 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1925 case OPC2_32_BO_ST_B_CIRC
:
1926 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
1927 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1929 case OPC2_32_BO_ST_D_BR
:
1930 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
1931 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1933 case OPC2_32_BO_ST_D_CIRC
:
1934 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1935 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
1936 tcg_gen_addi_tl(temp
, temp
, 4);
1937 tcg_gen_rem_tl(temp
, temp
, temp2
);
1938 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1939 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
1940 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1942 case OPC2_32_BO_ST_DA_BR
:
1943 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
1944 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1946 case OPC2_32_BO_ST_DA_CIRC
:
1947 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1948 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
1949 tcg_gen_addi_tl(temp
, temp
, 4);
1950 tcg_gen_rem_tl(temp
, temp
, temp2
);
1951 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1952 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
1953 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1955 case OPC2_32_BO_ST_H_BR
:
1956 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
1957 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1959 case OPC2_32_BO_ST_H_CIRC
:
1960 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
1961 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1963 case OPC2_32_BO_ST_Q_BR
:
1964 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1965 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
1966 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1968 case OPC2_32_BO_ST_Q_CIRC
:
1969 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1970 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
1971 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1973 case OPC2_32_BO_ST_W_BR
:
1974 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1975 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1977 case OPC2_32_BO_ST_W_CIRC
:
1978 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1979 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1982 tcg_temp_free(temp
);
1983 tcg_temp_free(temp2
);
1984 tcg_temp_free(temp3
);
1987 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
1995 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1996 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1997 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1998 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2001 case OPC2_32_BO_LD_A_SHORTOFF
:
2002 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2004 case OPC2_32_BO_LD_A_POSTINC
:
2005 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2007 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2009 case OPC2_32_BO_LD_A_PREINC
:
2010 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2012 case OPC2_32_BO_LD_B_SHORTOFF
:
2013 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2015 case OPC2_32_BO_LD_B_POSTINC
:
2016 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2018 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2020 case OPC2_32_BO_LD_B_PREINC
:
2021 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2023 case OPC2_32_BO_LD_BU_SHORTOFF
:
2024 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2026 case OPC2_32_BO_LD_BU_POSTINC
:
2027 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2029 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2031 case OPC2_32_BO_LD_BU_PREINC
:
2032 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2034 case OPC2_32_BO_LD_D_SHORTOFF
:
2035 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
2038 case OPC2_32_BO_LD_D_POSTINC
:
2039 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
2040 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2042 case OPC2_32_BO_LD_D_PREINC
:
2043 temp
= tcg_temp_new();
2044 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2045 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2046 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2047 tcg_temp_free(temp
);
2049 case OPC2_32_BO_LD_DA_SHORTOFF
:
2050 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2053 case OPC2_32_BO_LD_DA_POSTINC
:
2054 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
2055 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2057 case OPC2_32_BO_LD_DA_PREINC
:
2058 temp
= tcg_temp_new();
2059 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2060 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2061 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2062 tcg_temp_free(temp
);
2064 case OPC2_32_BO_LD_H_SHORTOFF
:
2065 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2067 case OPC2_32_BO_LD_H_POSTINC
:
2068 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2070 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2072 case OPC2_32_BO_LD_H_PREINC
:
2073 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2075 case OPC2_32_BO_LD_HU_SHORTOFF
:
2076 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2078 case OPC2_32_BO_LD_HU_POSTINC
:
2079 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2081 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2083 case OPC2_32_BO_LD_HU_PREINC
:
2084 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2086 case OPC2_32_BO_LD_Q_SHORTOFF
:
2087 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2088 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2090 case OPC2_32_BO_LD_Q_POSTINC
:
2091 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2093 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2094 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2096 case OPC2_32_BO_LD_Q_PREINC
:
2097 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2098 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2100 case OPC2_32_BO_LD_W_SHORTOFF
:
2101 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2103 case OPC2_32_BO_LD_W_POSTINC
:
2104 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2106 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2108 case OPC2_32_BO_LD_W_PREINC
:
2109 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2114 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
2121 TCGv temp
, temp2
, temp3
;
2123 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2124 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2125 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2126 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2128 temp
= tcg_temp_new();
2129 temp2
= tcg_temp_new();
2130 temp3
= tcg_const_i32(off10
);
2132 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2133 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2137 case OPC2_32_BO_LD_A_BR
:
2138 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2139 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2141 case OPC2_32_BO_LD_A_CIRC
:
2142 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2143 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2145 case OPC2_32_BO_LD_B_BR
:
2146 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
2147 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2149 case OPC2_32_BO_LD_B_CIRC
:
2150 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
2151 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2153 case OPC2_32_BO_LD_BU_BR
:
2154 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2155 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2157 case OPC2_32_BO_LD_BU_CIRC
:
2158 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2159 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2161 case OPC2_32_BO_LD_D_BR
:
2162 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
2163 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2165 case OPC2_32_BO_LD_D_CIRC
:
2166 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2167 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2168 tcg_gen_addi_tl(temp
, temp
, 4);
2169 tcg_gen_rem_tl(temp
, temp
, temp2
);
2170 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2171 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2172 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2174 case OPC2_32_BO_LD_DA_BR
:
2175 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
2176 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2178 case OPC2_32_BO_LD_DA_CIRC
:
2179 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2180 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2181 tcg_gen_addi_tl(temp
, temp
, 4);
2182 tcg_gen_rem_tl(temp
, temp
, temp2
);
2183 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2184 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2185 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2187 case OPC2_32_BO_LD_H_BR
:
2188 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
2189 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2191 case OPC2_32_BO_LD_H_CIRC
:
2192 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
2193 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2195 case OPC2_32_BO_LD_HU_BR
:
2196 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2197 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2199 case OPC2_32_BO_LD_HU_CIRC
:
2200 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2201 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2203 case OPC2_32_BO_LD_Q_BR
:
2204 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2205 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2206 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2208 case OPC2_32_BO_LD_Q_CIRC
:
2209 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2210 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2211 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2213 case OPC2_32_BO_LD_W_BR
:
2214 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2215 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2217 case OPC2_32_BO_LD_W_CIRC
:
2218 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2219 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2222 tcg_temp_free(temp
);
2223 tcg_temp_free(temp2
);
2224 tcg_temp_free(temp3
);
2227 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
2236 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2237 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2238 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2239 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2242 temp
= tcg_temp_new();
2243 temp2
= tcg_temp_new();
2246 case OPC2_32_BO_LDLCX_SHORTOFF
:
2247 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2248 gen_helper_ldlcx(cpu_env
, temp
);
2250 case OPC2_32_BO_LDMST_SHORTOFF
:
2251 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2252 gen_ldmst(ctx
, r1
, temp
);
2254 case OPC2_32_BO_LDMST_POSTINC
:
2255 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
2256 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2258 case OPC2_32_BO_LDMST_PREINC
:
2259 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2260 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
2262 case OPC2_32_BO_LDUCX_SHORTOFF
:
2263 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2264 gen_helper_lducx(cpu_env
, temp
);
2266 case OPC2_32_BO_LEA_SHORTOFF
:
2267 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
2269 case OPC2_32_BO_STLCX_SHORTOFF
:
2270 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2271 gen_helper_stlcx(cpu_env
, temp
);
2273 case OPC2_32_BO_STUCX_SHORTOFF
:
2274 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2275 gen_helper_stucx(cpu_env
, temp
);
2277 case OPC2_32_BO_SWAP_W_SHORTOFF
:
2278 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2279 gen_swap(ctx
, r1
, temp
);
2281 case OPC2_32_BO_SWAP_W_POSTINC
:
2282 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
2283 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2285 case OPC2_32_BO_SWAP_W_PREINC
:
2286 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2287 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
2290 tcg_temp_free(temp
);
2291 tcg_temp_free(temp2
);
2294 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
2301 TCGv temp
, temp2
, temp3
;
2303 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2304 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2305 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2306 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2308 temp
= tcg_temp_new();
2309 temp2
= tcg_temp_new();
2310 temp3
= tcg_const_i32(off10
);
2312 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2313 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2316 case OPC2_32_BO_LDMST_BR
:
2317 gen_ldmst(ctx
, r1
, temp2
);
2318 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2320 case OPC2_32_BO_LDMST_CIRC
:
2321 gen_ldmst(ctx
, r1
, temp2
);
2322 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2324 case OPC2_32_BO_SWAP_W_BR
:
2325 gen_swap(ctx
, r1
, temp2
);
2326 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2328 case OPC2_32_BO_SWAP_W_CIRC
:
2329 gen_swap(ctx
, r1
, temp2
);
2330 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2333 tcg_temp_free(temp
);
2334 tcg_temp_free(temp2
);
2335 tcg_temp_free(temp3
);
2338 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
2344 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
2345 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
2346 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
2349 case OPC1_32_BOL_LD_A_LONGOFF
:
2350 temp
= tcg_temp_new();
2351 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
2352 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
2353 tcg_temp_free(temp
);
2355 case OPC1_32_BOL_LD_W_LONFOFF
:
2356 temp
= tcg_temp_new();
2357 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
2358 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
2359 tcg_temp_free(temp
);
2361 case OPC1_32_BOL_LEA_LONGOFF
:
2362 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
2364 case OPC1_32_BOL_ST_A_LONGOFF
:
2365 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
2366 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
2368 /* raise illegal opcode trap */
2371 case OPC1_32_BOL_ST_W_LONGOFF
:
2372 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
2378 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2387 op1
= MASK_OP_MAJOR(ctx
->opcode
);
2389 /* handle JNZ.T opcode only being 6 bit long */
2390 if (unlikely((op1
& 0x3f) == OPCM_32_BRN_JTT
)) {
2391 op1
= OPCM_32_BRN_JTT
;
2396 case OPCM_32_ABS_LDW
:
2397 decode_abs_ldw(env
, ctx
);
2399 case OPCM_32_ABS_LDB
:
2400 decode_abs_ldb(env
, ctx
);
2402 case OPCM_32_ABS_LDMST_SWAP
:
2403 decode_abs_ldst_swap(env
, ctx
);
2405 case OPCM_32_ABS_LDST_CONTEXT
:
2406 decode_abs_ldst_context(env
, ctx
);
2408 case OPCM_32_ABS_STORE
:
2409 decode_abs_store(env
, ctx
);
2411 case OPCM_32_ABS_STOREB_H
:
2412 decode_abs_storeb_h(env
, ctx
);
2414 case OPC1_32_ABS_STOREQ
:
2415 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2416 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2417 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2418 temp2
= tcg_temp_new();
2420 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
2421 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
2423 tcg_temp_free(temp2
);
2424 tcg_temp_free(temp
);
2426 case OPC1_32_ABS_LD_Q
:
2427 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2428 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2429 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2431 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2432 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2434 tcg_temp_free(temp
);
2436 case OPC1_32_ABS_LEA
:
2437 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2438 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2439 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
2442 case OPC1_32_ABSB_ST_T
:
2443 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2444 b
= MASK_OP_ABSB_B(ctx
->opcode
);
2445 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
2447 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2448 temp2
= tcg_temp_new();
2450 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
2451 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
2452 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
2453 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
2455 tcg_temp_free(temp
);
2456 tcg_temp_free(temp2
);
2459 case OPC1_32_B_CALL
:
2460 case OPC1_32_B_CALLA
:
2465 address
= MASK_OP_B_DISP24(ctx
->opcode
);
2466 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
2469 case OPCM_32_BIT_ANDACC
:
2470 decode_bit_andacc(env
, ctx
);
2472 case OPCM_32_BIT_LOGICAL_T1
:
2473 decode_bit_logical_t(env
, ctx
);
2475 case OPCM_32_BIT_INSERT
:
2476 decode_bit_insert(env
, ctx
);
2478 case OPCM_32_BIT_LOGICAL_T2
:
2479 decode_bit_logical_t2(env
, ctx
);
2481 case OPCM_32_BIT_ORAND
:
2482 decode_bit_orand(env
, ctx
);
2484 case OPCM_32_BIT_SH_LOGIC1
:
2485 decode_bit_sh_logic1(env
, ctx
);
2487 case OPCM_32_BIT_SH_LOGIC2
:
2488 decode_bit_sh_logic2(env
, ctx
);
2491 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
2492 decode_bo_addrmode_post_pre_base(env
, ctx
);
2494 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
2495 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
2497 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
2498 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
2500 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
2501 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
2503 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
2504 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
2506 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
2507 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
2510 case OPC1_32_BOL_LD_A_LONGOFF
:
2511 case OPC1_32_BOL_LD_W_LONFOFF
:
2512 case OPC1_32_BOL_LEA_LONGOFF
:
2513 case OPC1_32_BOL_ST_W_LONGOFF
:
2514 case OPC1_32_BOL_ST_A_LONGOFF
:
2515 decode_bol_opc(env
, ctx
, op1
);
2518 case OPCM_32_BRC_EQ_NEQ
:
2519 case OPCM_32_BRC_GE
:
2520 case OPCM_32_BRC_JLT
:
2521 case OPCM_32_BRC_JNE
:
2522 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
2523 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
2524 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
2525 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
2528 case OPCM_32_BRN_JTT
:
2529 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
2530 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
2531 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
2536 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
2538 /* 16-Bit Instruction */
2539 if ((ctx
->opcode
& 0x1) == 0) {
2540 ctx
->next_pc
= ctx
->pc
+ 2;
2541 decode_16Bit_opc(env
, ctx
);
2542 /* 32-Bit Instruction */
2544 ctx
->next_pc
= ctx
->pc
+ 4;
2545 decode_32Bit_opc(env
, ctx
);
2550 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
2553 CPUState
*cs
= CPU(cpu
);
2554 CPUTriCoreState
*env
= &cpu
->env
;
2556 target_ulong pc_start
;
2558 uint16_t *gen_opc_end
;
2561 qemu_log("search pc %d\n", search_pc
);
2566 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
2570 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2571 ctx
.bstate
= BS_NONE
;
2572 ctx
.mem_idx
= cpu_mmu_index(env
);
2574 tcg_clear_temp_count();
2576 while (ctx
.bstate
== BS_NONE
) {
2577 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
2578 decode_opc(env
, &ctx
, 0);
2582 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
2583 gen_save_pc(ctx
.next_pc
);
2588 gen_save_pc(ctx
.next_pc
);
2592 ctx
.pc
= ctx
.next_pc
;
2595 gen_tb_end(tb
, num_insns
);
2596 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2598 printf("done_generating search pc\n");
2600 tb
->size
= ctx
.pc
- pc_start
;
2601 tb
->icount
= num_insns
;
2603 if (tcg_check_temp_count()) {
2604 printf("LEAK at %08x\n", env
->PC
);
2608 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2609 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2610 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
2617 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
2619 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
2623 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
2625 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
2629 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
2631 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2639 void cpu_state_reset(CPUTriCoreState
*env
)
2641 /* Reset Regs to Default Value */
2645 static void tricore_tcg_init_csfr(void)
2647 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
2648 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
2649 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
2650 offsetof(CPUTriCoreState
, PSW
), "PSW");
2651 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
2652 offsetof(CPUTriCoreState
, PC
), "PC");
2653 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
2654 offsetof(CPUTriCoreState
, ICR
), "ICR");
2657 void tricore_tcg_init(void)
2664 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
2666 for (i
= 0 ; i
< 16 ; i
++) {
2667 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
2668 offsetof(CPUTriCoreState
, gpr_a
[i
]),
2671 for (i
= 0 ; i
< 16 ; i
++) {
2672 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
2673 offsetof(CPUTriCoreState
, gpr_d
[i
]),
2676 tricore_tcg_init_csfr();
2677 /* init PSW flag cache */
2678 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
2679 offsetof(CPUTriCoreState
, PSW_USB_C
),
2681 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
2682 offsetof(CPUTriCoreState
, PSW_USB_V
),
2684 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
2685 offsetof(CPUTriCoreState
, PSW_USB_SV
),
2687 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
2688 offsetof(CPUTriCoreState
, PSW_USB_AV
),
2690 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
2691 offsetof(CPUTriCoreState
, PSW_USB_SAV
),