2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
119 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
120 ((offset & 0x0fffff) << 1))
122 /* Functions for load/save to/from memory */
124 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
125 int16_t con
, TCGMemOp mop
)
127 TCGv temp
= tcg_temp_new();
128 tcg_gen_addi_tl(temp
, r2
, con
);
129 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
133 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
134 int16_t con
, TCGMemOp mop
)
136 TCGv temp
= tcg_temp_new();
137 tcg_gen_addi_tl(temp
, r2
, con
);
138 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
142 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
144 TCGv_i64 temp
= tcg_temp_new_i64();
146 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
147 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
149 tcg_temp_free_i64(temp
);
152 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
155 TCGv temp
= tcg_temp_new();
156 tcg_gen_addi_tl(temp
, base
, con
);
157 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
161 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
163 TCGv_i64 temp
= tcg_temp_new_i64();
165 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
166 /* write back to two 32 bit regs */
167 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
169 tcg_temp_free_i64(temp
);
172 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
175 TCGv temp
= tcg_temp_new();
176 tcg_gen_addi_tl(temp
, base
, con
);
177 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
181 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
184 TCGv temp
= tcg_temp_new();
185 tcg_gen_addi_tl(temp
, r2
, off
);
186 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
187 tcg_gen_mov_tl(r2
, temp
);
191 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
194 TCGv temp
= tcg_temp_new();
195 tcg_gen_addi_tl(temp
, r2
, off
);
196 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
197 tcg_gen_mov_tl(r2
, temp
);
201 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
202 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
204 TCGv temp
= tcg_temp_new();
205 TCGv temp2
= tcg_temp_new();
207 /* temp = (M(EA, word) */
208 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
209 /* temp = temp & ~E[a][63:32]) */
210 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
211 /* temp2 = (E[a][31:0] & E[a][63:32]); */
212 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
213 /* temp = temp | temp2; */
214 tcg_gen_or_tl(temp
, temp
, temp2
);
215 /* M(EA, word) = temp; */
216 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
219 tcg_temp_free(temp2
);
222 /* tmp = M(EA, word);
225 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
227 TCGv temp
= tcg_temp_new();
229 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
230 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
231 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
236 /* Functions for arithmetic instructions */
238 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
240 TCGv t0
= tcg_temp_new_i32();
241 TCGv result
= tcg_temp_new_i32();
242 /* Addition and set V/SV bits */
243 tcg_gen_add_tl(result
, r1
, r2
);
245 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
246 tcg_gen_xor_tl(t0
, r1
, r2
);
247 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
249 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
250 /* Calc AV/SAV bits */
251 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
252 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
254 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
255 /* write back result */
256 tcg_gen_mov_tl(ret
, result
);
258 tcg_temp_free(result
);
262 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
264 TCGv temp
= tcg_const_i32(r2
);
265 gen_add_d(ret
, r1
, temp
);
269 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
272 TCGv temp
= tcg_temp_new();
273 TCGv temp2
= tcg_temp_new();
274 TCGv result
= tcg_temp_new();
275 TCGv mask
= tcg_temp_new();
276 TCGv t0
= tcg_const_i32(0);
278 /* create mask for sticky bits */
279 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
280 tcg_gen_shli_tl(mask
, mask
, 31);
282 tcg_gen_add_tl(result
, r1
, r2
);
284 tcg_gen_xor_tl(temp
, result
, r1
);
285 tcg_gen_xor_tl(temp2
, r1
, r2
);
286 tcg_gen_andc_tl(temp
, temp
, temp2
);
287 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
289 tcg_gen_and_tl(temp
, temp
, mask
);
290 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
292 tcg_gen_add_tl(temp
, result
, result
);
293 tcg_gen_xor_tl(temp
, temp
, result
);
294 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
296 tcg_gen_and_tl(temp
, temp
, mask
);
297 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
298 /* write back result */
299 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
303 tcg_temp_free(temp2
);
304 tcg_temp_free(result
);
308 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
311 TCGv temp
= tcg_const_i32(r2
);
312 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
316 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
318 TCGv temp
= tcg_temp_new_i32();
319 TCGv result
= tcg_temp_new_i32();
321 tcg_gen_sub_tl(result
, r1
, r2
);
323 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
324 tcg_gen_xor_tl(temp
, r1
, r2
);
325 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
327 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
329 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
330 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
332 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
333 /* write back result */
334 tcg_gen_mov_tl(ret
, result
);
337 tcg_temp_free(result
);
340 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
342 TCGv high
= tcg_temp_new();
343 TCGv low
= tcg_temp_new();
345 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
346 tcg_gen_mov_tl(ret
, low
);
348 tcg_gen_sari_tl(low
, low
, 31);
349 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
350 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
352 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
354 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
355 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
357 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
363 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
365 TCGv sat_neg
= tcg_const_i32(low
);
366 TCGv temp
= tcg_const_i32(up
);
368 /* sat_neg = (arg < low ) ? low : arg; */
369 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
371 /* ret = (sat_neg > up ) ? up : sat_neg; */
372 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
374 tcg_temp_free(sat_neg
);
378 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
380 TCGv temp
= tcg_const_i32(up
);
381 /* sat_neg = (arg > up ) ? up : arg; */
382 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
386 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
388 if (shift_count
== -32) {
389 tcg_gen_movi_tl(ret
, 0);
390 } else if (shift_count
>= 0) {
391 tcg_gen_shli_tl(ret
, r1
, shift_count
);
393 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
397 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
399 uint32_t msk
, msk_start
;
400 TCGv temp
= tcg_temp_new();
401 TCGv temp2
= tcg_temp_new();
402 TCGv t_0
= tcg_const_i32(0);
404 if (shift_count
== 0) {
405 /* Clear PSW.C and PSW.V */
406 tcg_gen_movi_tl(cpu_PSW_C
, 0);
407 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
408 tcg_gen_mov_tl(ret
, r1
);
409 } else if (shift_count
== -32) {
411 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
412 /* fill ret completly with sign bit */
413 tcg_gen_sari_tl(ret
, r1
, 31);
415 tcg_gen_movi_tl(cpu_PSW_V
, 0);
416 } else if (shift_count
> 0) {
417 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
418 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
421 msk_start
= 32 - shift_count
;
422 msk
= ((1 << shift_count
) - 1) << msk_start
;
423 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
425 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
426 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
427 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
428 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
430 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
432 tcg_gen_shli_tl(ret
, r1
, shift_count
);
434 tcg_temp_free(t_max
);
435 tcg_temp_free(t_min
);
438 tcg_gen_movi_tl(cpu_PSW_V
, 0);
440 msk
= (1 << -shift_count
) - 1;
441 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
443 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
445 /* calc av overflow bit */
446 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
447 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
448 /* calc sav overflow bit */
449 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
452 tcg_temp_free(temp2
);
456 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
458 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
461 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
463 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
466 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
468 void(*op1
)(TCGv
, TCGv
, TCGv
),
469 void(*op2
)(TCGv
, TCGv
, TCGv
))
473 temp1
= tcg_temp_new();
474 temp2
= tcg_temp_new();
476 tcg_gen_shri_tl(temp2
, r2
, pos2
);
477 tcg_gen_shri_tl(temp1
, r1
, pos1
);
479 (*op1
)(temp1
, temp1
, temp2
);
480 (*op2
)(temp1
, ret
, temp1
);
482 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
484 tcg_temp_free(temp1
);
485 tcg_temp_free(temp2
);
488 /* ret = r1[pos1] op1 r2[pos2]; */
489 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
491 void(*op1
)(TCGv
, TCGv
, TCGv
))
495 temp1
= tcg_temp_new();
496 temp2
= tcg_temp_new();
498 tcg_gen_shri_tl(temp2
, r2
, pos2
);
499 tcg_gen_shri_tl(temp1
, r1
, pos1
);
501 (*op1
)(ret
, temp1
, temp2
);
503 tcg_gen_andi_tl(ret
, ret
, 0x1);
505 tcg_temp_free(temp1
);
506 tcg_temp_free(temp2
);
509 /* helpers for generating program flow micro-ops */
511 static inline void gen_save_pc(target_ulong pc
)
513 tcg_gen_movi_tl(cpu_PC
, pc
);
516 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
518 TranslationBlock
*tb
;
520 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
521 likely(!ctx
->singlestep_enabled
)) {
524 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
527 if (ctx
->singlestep_enabled
) {
528 /* raise exception debug */
534 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
535 TCGv r2
, int16_t address
)
538 jumpLabel
= gen_new_label();
539 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
541 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
543 gen_set_label(jumpLabel
);
544 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
547 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
548 int r2
, int16_t address
)
550 TCGv temp
= tcg_const_i32(r2
);
551 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
555 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
558 l1
= gen_new_label();
560 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
561 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
562 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
564 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
567 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
568 int r2
, int32_t constant
, int32_t offset
)
573 /* SB-format jumps */
576 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
579 case OPC1_16_SB_CALL
:
580 gen_helper_1arg(call
, ctx
->next_pc
);
581 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
584 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
587 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
589 /* SBC-format jumps */
590 case OPC1_16_SBC_JEQ
:
591 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
593 case OPC1_16_SBC_JNE
:
594 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
596 /* SBRN-format jumps */
597 case OPC1_16_SBRN_JZ_T
:
598 temp
= tcg_temp_new();
599 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
600 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
603 case OPC1_16_SBRN_JNZ_T
:
604 temp
= tcg_temp_new();
605 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
606 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
609 /* SBR-format jumps */
610 case OPC1_16_SBR_JEQ
:
611 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
614 case OPC1_16_SBR_JNE
:
615 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
618 case OPC1_16_SBR_JNZ
:
619 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
621 case OPC1_16_SBR_JNZ_A
:
622 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
624 case OPC1_16_SBR_JGEZ
:
625 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
627 case OPC1_16_SBR_JGTZ
:
628 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
630 case OPC1_16_SBR_JLEZ
:
631 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
633 case OPC1_16_SBR_JLTZ
:
634 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
637 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
639 case OPC1_16_SBR_JZ_A
:
640 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
642 case OPC1_16_SBR_LOOP
:
643 gen_loop(ctx
, r1
, offset
* 2 - 32);
645 /* SR-format jumps */
647 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
651 gen_helper_ret(cpu_env
);
655 case OPC1_32_B_CALLA
:
656 gen_helper_1arg(call
, ctx
->next_pc
);
657 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
660 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
662 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
665 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
666 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
669 printf("Branch Error at %x\n", ctx
->pc
);
671 ctx
->bstate
= BS_BRANCH
;
676 * Functions for decoding instructions
679 static void decode_src_opc(DisasContext
*ctx
, int op1
)
685 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
686 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
689 case OPC1_16_SRC_ADD
:
690 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
692 case OPC1_16_SRC_ADD_A15
:
693 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
695 case OPC1_16_SRC_ADD_15A
:
696 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
698 case OPC1_16_SRC_ADD_A
:
699 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
701 case OPC1_16_SRC_CADD
:
702 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
705 case OPC1_16_SRC_CADDN
:
706 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
709 case OPC1_16_SRC_CMOV
:
710 temp
= tcg_const_tl(0);
711 temp2
= tcg_const_tl(const4
);
712 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
713 temp2
, cpu_gpr_d
[r1
]);
715 tcg_temp_free(temp2
);
717 case OPC1_16_SRC_CMOVN
:
718 temp
= tcg_const_tl(0);
719 temp2
= tcg_const_tl(const4
);
720 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
721 temp2
, cpu_gpr_d
[r1
]);
723 tcg_temp_free(temp2
);
726 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
730 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
733 case OPC1_16_SRC_MOV
:
734 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
736 case OPC1_16_SRC_MOV_A
:
737 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
738 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
741 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
743 case OPC1_16_SRC_SHA
:
744 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
749 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
754 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
755 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
758 case OPC1_16_SRR_ADD
:
759 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
761 case OPC1_16_SRR_ADD_A15
:
762 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
764 case OPC1_16_SRR_ADD_15A
:
765 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
767 case OPC1_16_SRR_ADD_A
:
768 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
770 case OPC1_16_SRR_ADDS
:
771 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
773 case OPC1_16_SRR_AND
:
774 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
776 case OPC1_16_SRR_CMOV
:
777 temp
= tcg_const_tl(0);
778 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
779 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
782 case OPC1_16_SRR_CMOVN
:
783 temp
= tcg_const_tl(0);
784 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
785 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
789 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
793 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
796 case OPC1_16_SRR_MOV
:
797 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
799 case OPC1_16_SRR_MOV_A
:
800 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
802 case OPC1_16_SRR_MOV_AA
:
803 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
805 case OPC1_16_SRR_MOV_D
:
806 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
808 case OPC1_16_SRR_MUL
:
809 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
812 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
814 case OPC1_16_SRR_SUB
:
815 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
817 case OPC1_16_SRR_SUB_A15B
:
818 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
820 case OPC1_16_SRR_SUB_15AB
:
821 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
823 case OPC1_16_SRR_SUBS
:
824 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
826 case OPC1_16_SRR_XOR
:
827 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
832 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
836 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
837 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
840 case OPC1_16_SSR_ST_A
:
841 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
843 case OPC1_16_SSR_ST_A_POSTINC
:
844 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
845 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
847 case OPC1_16_SSR_ST_B
:
848 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
850 case OPC1_16_SSR_ST_B_POSTINC
:
851 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
852 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
854 case OPC1_16_SSR_ST_H
:
855 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
857 case OPC1_16_SSR_ST_H_POSTINC
:
858 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
859 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
861 case OPC1_16_SSR_ST_W
:
862 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
864 case OPC1_16_SSR_ST_W_POSTINC
:
865 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
866 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
871 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
875 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
879 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
881 case OPC1_16_SC_BISR
:
882 gen_helper_1arg(bisr
, const16
& 0xff);
884 case OPC1_16_SC_LD_A
:
885 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
887 case OPC1_16_SC_LD_W
:
888 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
891 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
894 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
896 case OPC1_16_SC_ST_A
:
897 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
899 case OPC1_16_SC_ST_W
:
900 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
902 case OPC1_16_SC_SUB_A
:
903 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
908 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
912 r1
= MASK_OP_SLR_D(ctx
->opcode
);
913 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
917 case OPC1_16_SLR_LD_A
:
918 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
920 case OPC1_16_SLR_LD_A_POSTINC
:
921 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
922 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
924 case OPC1_16_SLR_LD_BU
:
925 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
927 case OPC1_16_SLR_LD_BU_POSTINC
:
928 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
929 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
931 case OPC1_16_SLR_LD_H
:
932 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
934 case OPC1_16_SLR_LD_H_POSTINC
:
935 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
936 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
938 case OPC1_16_SLR_LD_W
:
939 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
941 case OPC1_16_SLR_LD_W_POSTINC
:
942 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
943 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
948 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
953 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
954 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
958 case OPC1_16_SRO_LD_A
:
959 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
961 case OPC1_16_SRO_LD_BU
:
962 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
964 case OPC1_16_SRO_LD_H
:
965 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
967 case OPC1_16_SRO_LD_W
:
968 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
970 case OPC1_16_SRO_ST_A
:
971 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
973 case OPC1_16_SRO_ST_B
:
974 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
976 case OPC1_16_SRO_ST_H
:
977 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
979 case OPC1_16_SRO_ST_W
:
980 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
985 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
988 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
994 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
997 gen_helper_rfe(cpu_env
);
999 ctx
->bstate
= BS_BRANCH
;
1001 case OPC2_16_SR_DEBUG
:
1002 /* raise EXCP_DEBUG */
1007 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
1013 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1014 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
1017 case OPC2_16_SR_RSUB
:
1018 /* overflow only if r1 = -0x80000000 */
1019 temp
= tcg_const_i32(-0x80000000);
1021 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
1022 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1024 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1026 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1028 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1029 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
1031 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1032 tcg_temp_free(temp
);
1034 case OPC2_16_SR_SAT_B
:
1035 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
1037 case OPC2_16_SR_SAT_BU
:
1038 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
1040 case OPC2_16_SR_SAT_H
:
1041 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
1043 case OPC2_16_SR_SAT_HU
:
1044 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
1049 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1057 op1
= MASK_OP_MAJOR(ctx
->opcode
);
1059 /* handle ADDSC.A opcode only being 6 bit long */
1060 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
1061 op1
= OPC1_16_SRRS_ADDSC_A
;
1065 case OPC1_16_SRC_ADD
:
1066 case OPC1_16_SRC_ADD_A15
:
1067 case OPC1_16_SRC_ADD_15A
:
1068 case OPC1_16_SRC_ADD_A
:
1069 case OPC1_16_SRC_CADD
:
1070 case OPC1_16_SRC_CADDN
:
1071 case OPC1_16_SRC_CMOV
:
1072 case OPC1_16_SRC_CMOVN
:
1073 case OPC1_16_SRC_EQ
:
1074 case OPC1_16_SRC_LT
:
1075 case OPC1_16_SRC_MOV
:
1076 case OPC1_16_SRC_MOV_A
:
1077 case OPC1_16_SRC_SH
:
1078 case OPC1_16_SRC_SHA
:
1079 decode_src_opc(ctx
, op1
);
1082 case OPC1_16_SRR_ADD
:
1083 case OPC1_16_SRR_ADD_A15
:
1084 case OPC1_16_SRR_ADD_15A
:
1085 case OPC1_16_SRR_ADD_A
:
1086 case OPC1_16_SRR_ADDS
:
1087 case OPC1_16_SRR_AND
:
1088 case OPC1_16_SRR_CMOV
:
1089 case OPC1_16_SRR_CMOVN
:
1090 case OPC1_16_SRR_EQ
:
1091 case OPC1_16_SRR_LT
:
1092 case OPC1_16_SRR_MOV
:
1093 case OPC1_16_SRR_MOV_A
:
1094 case OPC1_16_SRR_MOV_AA
:
1095 case OPC1_16_SRR_MOV_D
:
1096 case OPC1_16_SRR_MUL
:
1097 case OPC1_16_SRR_OR
:
1098 case OPC1_16_SRR_SUB
:
1099 case OPC1_16_SRR_SUB_A15B
:
1100 case OPC1_16_SRR_SUB_15AB
:
1101 case OPC1_16_SRR_SUBS
:
1102 case OPC1_16_SRR_XOR
:
1103 decode_srr_opc(ctx
, op1
);
1106 case OPC1_16_SSR_ST_A
:
1107 case OPC1_16_SSR_ST_A_POSTINC
:
1108 case OPC1_16_SSR_ST_B
:
1109 case OPC1_16_SSR_ST_B_POSTINC
:
1110 case OPC1_16_SSR_ST_H
:
1111 case OPC1_16_SSR_ST_H_POSTINC
:
1112 case OPC1_16_SSR_ST_W
:
1113 case OPC1_16_SSR_ST_W_POSTINC
:
1114 decode_ssr_opc(ctx
, op1
);
1117 case OPC1_16_SRRS_ADDSC_A
:
1118 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
1119 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
1120 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
1121 temp
= tcg_temp_new();
1122 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
1123 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
1124 tcg_temp_free(temp
);
1127 case OPC1_16_SLRO_LD_A
:
1128 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1129 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1130 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1132 case OPC1_16_SLRO_LD_BU
:
1133 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1134 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1135 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1137 case OPC1_16_SLRO_LD_H
:
1138 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1139 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1140 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1142 case OPC1_16_SLRO_LD_W
:
1143 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1144 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1145 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1148 case OPC1_16_SB_CALL
:
1150 case OPC1_16_SB_JNZ
:
1152 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
1153 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
1156 case OPC1_16_SBC_JEQ
:
1157 case OPC1_16_SBC_JNE
:
1158 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
1159 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
1160 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1163 case OPC1_16_SBRN_JNZ_T
:
1164 case OPC1_16_SBRN_JZ_T
:
1165 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
1166 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
1167 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1170 case OPC1_16_SBR_JEQ
:
1171 case OPC1_16_SBR_JGEZ
:
1172 case OPC1_16_SBR_JGTZ
:
1173 case OPC1_16_SBR_JLEZ
:
1174 case OPC1_16_SBR_JLTZ
:
1175 case OPC1_16_SBR_JNE
:
1176 case OPC1_16_SBR_JNZ
:
1177 case OPC1_16_SBR_JNZ_A
:
1178 case OPC1_16_SBR_JZ
:
1179 case OPC1_16_SBR_JZ_A
:
1180 case OPC1_16_SBR_LOOP
:
1181 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
1182 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
1183 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
1186 case OPC1_16_SC_AND
:
1187 case OPC1_16_SC_BISR
:
1188 case OPC1_16_SC_LD_A
:
1189 case OPC1_16_SC_LD_W
:
1190 case OPC1_16_SC_MOV
:
1192 case OPC1_16_SC_ST_A
:
1193 case OPC1_16_SC_ST_W
:
1194 case OPC1_16_SC_SUB_A
:
1195 decode_sc_opc(ctx
, op1
);
1198 case OPC1_16_SLR_LD_A
:
1199 case OPC1_16_SLR_LD_A_POSTINC
:
1200 case OPC1_16_SLR_LD_BU
:
1201 case OPC1_16_SLR_LD_BU_POSTINC
:
1202 case OPC1_16_SLR_LD_H
:
1203 case OPC1_16_SLR_LD_H_POSTINC
:
1204 case OPC1_16_SLR_LD_W
:
1205 case OPC1_16_SLR_LD_W_POSTINC
:
1206 decode_slr_opc(ctx
, op1
);
1209 case OPC1_16_SRO_LD_A
:
1210 case OPC1_16_SRO_LD_BU
:
1211 case OPC1_16_SRO_LD_H
:
1212 case OPC1_16_SRO_LD_W
:
1213 case OPC1_16_SRO_ST_A
:
1214 case OPC1_16_SRO_ST_B
:
1215 case OPC1_16_SRO_ST_H
:
1216 case OPC1_16_SRO_ST_W
:
1217 decode_sro_opc(ctx
, op1
);
1220 case OPC1_16_SSRO_ST_A
:
1221 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1222 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1223 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1225 case OPC1_16_SSRO_ST_B
:
1226 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1227 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1228 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1230 case OPC1_16_SSRO_ST_H
:
1231 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1232 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1233 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1235 case OPC1_16_SSRO_ST_W
:
1236 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1237 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1238 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1241 case OPCM_16_SR_SYSTEM
:
1242 decode_sr_system(env
, ctx
);
1244 case OPCM_16_SR_ACCU
:
1245 decode_sr_accu(env
, ctx
);
1248 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1249 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
1251 case OPC1_16_SR_NOT
:
1252 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1253 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1259 * 32 bit instructions
1263 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
1270 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1271 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1272 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1274 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1277 case OPC2_32_ABS_LD_A
:
1278 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1280 case OPC2_32_ABS_LD_D
:
1281 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1283 case OPC2_32_ABS_LD_DA
:
1284 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1286 case OPC2_32_ABS_LD_W
:
1287 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1291 tcg_temp_free(temp
);
1294 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
1301 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1302 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1303 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1305 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1308 case OPC2_32_ABS_LD_B
:
1309 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
1311 case OPC2_32_ABS_LD_BU
:
1312 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1314 case OPC2_32_ABS_LD_H
:
1315 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
1317 case OPC2_32_ABS_LD_HU
:
1318 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1322 tcg_temp_free(temp
);
1325 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
1332 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1333 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1334 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1336 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1339 case OPC2_32_ABS_LDMST
:
1340 gen_ldmst(ctx
, r1
, temp
);
1342 case OPC2_32_ABS_SWAP_W
:
1343 gen_swap(ctx
, r1
, temp
);
1347 tcg_temp_free(temp
);
1350 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
1355 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1356 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1359 case OPC2_32_ABS_LDLCX
:
1360 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
1362 case OPC2_32_ABS_LDUCX
:
1363 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
1365 case OPC2_32_ABS_STLCX
:
1366 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
1368 case OPC2_32_ABS_STUCX
:
1369 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
1374 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
1381 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1382 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1383 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1385 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1388 case OPC2_32_ABS_ST_A
:
1389 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1391 case OPC2_32_ABS_ST_D
:
1392 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1394 case OPC2_32_ABS_ST_DA
:
1395 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1397 case OPC2_32_ABS_ST_W
:
1398 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1402 tcg_temp_free(temp
);
1405 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
1412 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1413 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1414 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1416 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1419 case OPC2_32_ABS_ST_B
:
1420 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1422 case OPC2_32_ABS_ST_H
:
1423 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1426 tcg_temp_free(temp
);
1431 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1437 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1438 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1439 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1440 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1441 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1442 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1446 case OPC2_32_BIT_AND_AND_T
:
1447 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1448 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
1450 case OPC2_32_BIT_AND_ANDN_T
:
1451 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1452 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
1454 case OPC2_32_BIT_AND_NOR_T
:
1455 if (TCG_TARGET_HAS_andc_i32
) {
1456 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1457 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
1459 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1460 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
1463 case OPC2_32_BIT_AND_OR_T
:
1464 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1465 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
1470 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
1475 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1476 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1477 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1478 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1479 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1480 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1483 case OPC2_32_BIT_AND_T
:
1484 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1485 pos1
, pos2
, &tcg_gen_and_tl
);
1487 case OPC2_32_BIT_ANDN_T
:
1488 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1489 pos1
, pos2
, &tcg_gen_andc_tl
);
1491 case OPC2_32_BIT_NOR_T
:
1492 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1493 pos1
, pos2
, &tcg_gen_nor_tl
);
1495 case OPC2_32_BIT_OR_T
:
1496 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1497 pos1
, pos2
, &tcg_gen_or_tl
);
1502 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
1508 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1509 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1510 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1511 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1512 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1513 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1515 temp
= tcg_temp_new();
1517 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
1518 if (op2
== OPC2_32_BIT_INSN_T
) {
1519 tcg_gen_not_tl(temp
, temp
);
1521 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
1522 tcg_temp_free(temp
);
1525 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1532 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1533 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1534 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1535 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1536 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1537 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1540 case OPC2_32_BIT_NAND_T
:
1541 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1542 pos1
, pos2
, &tcg_gen_nand_tl
);
1544 case OPC2_32_BIT_ORN_T
:
1545 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1546 pos1
, pos2
, &tcg_gen_orc_tl
);
1548 case OPC2_32_BIT_XNOR_T
:
1549 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1550 pos1
, pos2
, &tcg_gen_eqv_tl
);
1552 case OPC2_32_BIT_XOR_T
:
1553 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1554 pos1
, pos2
, &tcg_gen_xor_tl
);
1559 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
1566 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1567 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1568 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1569 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1570 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1571 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1574 case OPC2_32_BIT_OR_AND_T
:
1575 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1576 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
1578 case OPC2_32_BIT_OR_ANDN_T
:
1579 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1580 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
1582 case OPC2_32_BIT_OR_NOR_T
:
1583 if (TCG_TARGET_HAS_orc_i32
) {
1584 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1585 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
1587 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1588 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
1591 case OPC2_32_BIT_OR_OR_T
:
1592 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1593 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
1598 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
1605 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1606 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1607 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1608 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1609 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1610 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1612 temp
= tcg_temp_new();
1615 case OPC2_32_BIT_SH_AND_T
:
1616 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1617 pos1
, pos2
, &tcg_gen_and_tl
);
1619 case OPC2_32_BIT_SH_ANDN_T
:
1620 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1621 pos1
, pos2
, &tcg_gen_andc_tl
);
1623 case OPC2_32_BIT_SH_NOR_T
:
1624 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1625 pos1
, pos2
, &tcg_gen_nor_tl
);
1627 case OPC2_32_BIT_SH_OR_T
:
1628 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1629 pos1
, pos2
, &tcg_gen_or_tl
);
1632 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1633 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1634 tcg_temp_free(temp
);
1637 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1644 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1645 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1646 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1647 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1648 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1649 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1651 temp
= tcg_temp_new();
1654 case OPC2_32_BIT_SH_NAND_T
:
1655 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
1656 pos1
, pos2
, &tcg_gen_nand_tl
);
1658 case OPC2_32_BIT_SH_ORN_T
:
1659 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1660 pos1
, pos2
, &tcg_gen_orc_tl
);
1662 case OPC2_32_BIT_SH_XNOR_T
:
1663 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1664 pos1
, pos2
, &tcg_gen_eqv_tl
);
1666 case OPC2_32_BIT_SH_XOR_T
:
1667 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1668 pos1
, pos2
, &tcg_gen_xor_tl
);
1671 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1672 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1673 tcg_temp_free(temp
);
1679 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
1687 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1688 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1689 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1690 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
1693 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
1694 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
1695 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
1696 /* instruction to access the cache */
1698 case OPC2_32_BO_CACHEA_WI_POSTINC
:
1699 case OPC2_32_BO_CACHEA_W_POSTINC
:
1700 case OPC2_32_BO_CACHEA_I_POSTINC
:
1701 /* instruction to access the cache, but we still need to handle
1702 the addressing mode */
1703 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1705 case OPC2_32_BO_CACHEA_WI_PREINC
:
1706 case OPC2_32_BO_CACHEA_W_PREINC
:
1707 case OPC2_32_BO_CACHEA_I_PREINC
:
1708 /* instruction to access the cache, but we still need to handle
1709 the addressing mode */
1710 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1712 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
1713 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
1714 /* TODO: Raise illegal opcode trap,
1715 if tricore_feature(TRICORE_FEATURE_13) */
1717 case OPC2_32_BO_CACHEI_W_POSTINC
:
1718 case OPC2_32_BO_CACHEI_WI_POSTINC
:
1719 if (!tricore_feature(env
, TRICORE_FEATURE_13
)) {
1720 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1721 } /* TODO: else raise illegal opcode trap */
1723 case OPC2_32_BO_CACHEI_W_PREINC
:
1724 case OPC2_32_BO_CACHEI_WI_PREINC
:
1725 if (!tricore_feature(env
, TRICORE_FEATURE_13
)) {
1726 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
1727 } /* TODO: else raise illegal opcode trap */
1729 case OPC2_32_BO_ST_A_SHORTOFF
:
1730 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
1732 case OPC2_32_BO_ST_A_POSTINC
:
1733 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1735 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1737 case OPC2_32_BO_ST_A_PREINC
:
1738 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
1740 case OPC2_32_BO_ST_B_SHORTOFF
:
1741 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
1743 case OPC2_32_BO_ST_B_POSTINC
:
1744 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1746 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1748 case OPC2_32_BO_ST_B_PREINC
:
1749 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
1751 case OPC2_32_BO_ST_D_SHORTOFF
:
1752 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
1755 case OPC2_32_BO_ST_D_POSTINC
:
1756 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
1757 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1759 case OPC2_32_BO_ST_D_PREINC
:
1760 temp
= tcg_temp_new();
1761 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
1762 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1763 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
1764 tcg_temp_free(temp
);
1766 case OPC2_32_BO_ST_DA_SHORTOFF
:
1767 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1770 case OPC2_32_BO_ST_DA_POSTINC
:
1771 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
1772 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1774 case OPC2_32_BO_ST_DA_PREINC
:
1775 temp
= tcg_temp_new();
1776 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
1777 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1778 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
1779 tcg_temp_free(temp
);
1781 case OPC2_32_BO_ST_H_SHORTOFF
:
1782 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1784 case OPC2_32_BO_ST_H_POSTINC
:
1785 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1787 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1789 case OPC2_32_BO_ST_H_PREINC
:
1790 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1792 case OPC2_32_BO_ST_Q_SHORTOFF
:
1793 temp
= tcg_temp_new();
1794 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1795 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1796 tcg_temp_free(temp
);
1798 case OPC2_32_BO_ST_Q_POSTINC
:
1799 temp
= tcg_temp_new();
1800 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1801 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
1803 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1804 tcg_temp_free(temp
);
1806 case OPC2_32_BO_ST_Q_PREINC
:
1807 temp
= tcg_temp_new();
1808 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1809 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
1810 tcg_temp_free(temp
);
1812 case OPC2_32_BO_ST_W_SHORTOFF
:
1813 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1815 case OPC2_32_BO_ST_W_POSTINC
:
1816 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1818 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1820 case OPC2_32_BO_ST_W_PREINC
:
1821 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1826 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
1832 TCGv temp
, temp2
, temp3
;
1834 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1835 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1836 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1837 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
1839 temp
= tcg_temp_new();
1840 temp2
= tcg_temp_new();
1841 temp3
= tcg_const_i32(off10
);
1843 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
1844 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1847 case OPC2_32_BO_CACHEA_WI_BR
:
1848 case OPC2_32_BO_CACHEA_W_BR
:
1849 case OPC2_32_BO_CACHEA_I_BR
:
1850 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1852 case OPC2_32_BO_CACHEA_WI_CIRC
:
1853 case OPC2_32_BO_CACHEA_W_CIRC
:
1854 case OPC2_32_BO_CACHEA_I_CIRC
:
1855 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1857 case OPC2_32_BO_ST_A_BR
:
1858 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1859 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1861 case OPC2_32_BO_ST_A_CIRC
:
1862 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1863 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1865 case OPC2_32_BO_ST_B_BR
:
1866 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
1867 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1869 case OPC2_32_BO_ST_B_CIRC
:
1870 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
1871 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1873 case OPC2_32_BO_ST_D_BR
:
1874 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
1875 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1877 case OPC2_32_BO_ST_D_CIRC
:
1878 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1879 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
1880 tcg_gen_addi_tl(temp
, temp
, 4);
1881 tcg_gen_rem_tl(temp
, temp
, temp2
);
1882 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1883 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
1884 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1886 case OPC2_32_BO_ST_DA_BR
:
1887 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
1888 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1890 case OPC2_32_BO_ST_DA_CIRC
:
1891 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1892 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
1893 tcg_gen_addi_tl(temp
, temp
, 4);
1894 tcg_gen_rem_tl(temp
, temp
, temp2
);
1895 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
1896 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
1897 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1899 case OPC2_32_BO_ST_H_BR
:
1900 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
1901 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1903 case OPC2_32_BO_ST_H_CIRC
:
1904 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
1905 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1907 case OPC2_32_BO_ST_Q_BR
:
1908 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1909 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
1910 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1912 case OPC2_32_BO_ST_Q_CIRC
:
1913 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
1914 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
1915 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1917 case OPC2_32_BO_ST_W_BR
:
1918 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1919 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
1921 case OPC2_32_BO_ST_W_CIRC
:
1922 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
1923 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
1926 tcg_temp_free(temp
);
1927 tcg_temp_free(temp2
);
1928 tcg_temp_free(temp3
);
1931 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
1939 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
1940 r2
= MASK_OP_BO_S2(ctx
->opcode
);
1941 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
1942 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
1945 case OPC2_32_BO_LD_A_SHORTOFF
:
1946 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1948 case OPC2_32_BO_LD_A_POSTINC
:
1949 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1951 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1953 case OPC2_32_BO_LD_A_PREINC
:
1954 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
1956 case OPC2_32_BO_LD_B_SHORTOFF
:
1957 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
1959 case OPC2_32_BO_LD_B_POSTINC
:
1960 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1962 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1964 case OPC2_32_BO_LD_B_PREINC
:
1965 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
1967 case OPC2_32_BO_LD_BU_SHORTOFF
:
1968 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
1970 case OPC2_32_BO_LD_BU_POSTINC
:
1971 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
1973 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1975 case OPC2_32_BO_LD_BU_PREINC
:
1976 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
1978 case OPC2_32_BO_LD_D_SHORTOFF
:
1979 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
1982 case OPC2_32_BO_LD_D_POSTINC
:
1983 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
1984 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
1986 case OPC2_32_BO_LD_D_PREINC
:
1987 temp
= tcg_temp_new();
1988 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
1989 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1990 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
1991 tcg_temp_free(temp
);
1993 case OPC2_32_BO_LD_DA_SHORTOFF
:
1994 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1997 case OPC2_32_BO_LD_DA_POSTINC
:
1998 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
1999 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2001 case OPC2_32_BO_LD_DA_PREINC
:
2002 temp
= tcg_temp_new();
2003 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2004 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2005 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2006 tcg_temp_free(temp
);
2008 case OPC2_32_BO_LD_H_SHORTOFF
:
2009 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2011 case OPC2_32_BO_LD_H_POSTINC
:
2012 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2014 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2016 case OPC2_32_BO_LD_H_PREINC
:
2017 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2019 case OPC2_32_BO_LD_HU_SHORTOFF
:
2020 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2022 case OPC2_32_BO_LD_HU_POSTINC
:
2023 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2025 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2027 case OPC2_32_BO_LD_HU_PREINC
:
2028 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2030 case OPC2_32_BO_LD_Q_SHORTOFF
:
2031 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2032 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2034 case OPC2_32_BO_LD_Q_POSTINC
:
2035 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2037 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2038 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2040 case OPC2_32_BO_LD_Q_PREINC
:
2041 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2042 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2044 case OPC2_32_BO_LD_W_SHORTOFF
:
2045 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2047 case OPC2_32_BO_LD_W_POSTINC
:
2048 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2050 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2052 case OPC2_32_BO_LD_W_PREINC
:
2053 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2058 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
2065 TCGv temp
, temp2
, temp3
;
2067 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2068 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2069 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2070 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2072 temp
= tcg_temp_new();
2073 temp2
= tcg_temp_new();
2074 temp3
= tcg_const_i32(off10
);
2076 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2077 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2081 case OPC2_32_BO_LD_A_BR
:
2082 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2083 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2085 case OPC2_32_BO_LD_A_CIRC
:
2086 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2087 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2089 case OPC2_32_BO_LD_B_BR
:
2090 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
2091 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2093 case OPC2_32_BO_LD_B_CIRC
:
2094 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
2095 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2097 case OPC2_32_BO_LD_BU_BR
:
2098 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2099 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2101 case OPC2_32_BO_LD_BU_CIRC
:
2102 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2103 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2105 case OPC2_32_BO_LD_D_BR
:
2106 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
2107 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2109 case OPC2_32_BO_LD_D_CIRC
:
2110 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2111 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2112 tcg_gen_addi_tl(temp
, temp
, 4);
2113 tcg_gen_rem_tl(temp
, temp
, temp2
);
2114 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2115 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2116 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2118 case OPC2_32_BO_LD_DA_BR
:
2119 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
2120 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2122 case OPC2_32_BO_LD_DA_CIRC
:
2123 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2124 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2125 tcg_gen_addi_tl(temp
, temp
, 4);
2126 tcg_gen_rem_tl(temp
, temp
, temp2
);
2127 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2128 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2129 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2131 case OPC2_32_BO_LD_H_BR
:
2132 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
2133 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2135 case OPC2_32_BO_LD_H_CIRC
:
2136 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
2137 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2139 case OPC2_32_BO_LD_HU_BR
:
2140 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2141 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2143 case OPC2_32_BO_LD_HU_CIRC
:
2144 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2145 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2147 case OPC2_32_BO_LD_Q_BR
:
2148 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2149 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2150 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2152 case OPC2_32_BO_LD_Q_CIRC
:
2153 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2154 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2155 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2157 case OPC2_32_BO_LD_W_BR
:
2158 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2159 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2161 case OPC2_32_BO_LD_W_CIRC
:
2162 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2163 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2166 tcg_temp_free(temp
);
2167 tcg_temp_free(temp2
);
2168 tcg_temp_free(temp3
);
2171 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
2180 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2181 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2182 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2183 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2186 temp
= tcg_temp_new();
2187 temp2
= tcg_temp_new();
2190 case OPC2_32_BO_LDLCX_SHORTOFF
:
2191 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2192 gen_helper_ldlcx(cpu_env
, temp
);
2194 case OPC2_32_BO_LDMST_SHORTOFF
:
2195 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2196 gen_ldmst(ctx
, r1
, temp
);
2198 case OPC2_32_BO_LDMST_POSTINC
:
2199 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
2200 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2202 case OPC2_32_BO_LDMST_PREINC
:
2203 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2204 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
2206 case OPC2_32_BO_LDUCX_SHORTOFF
:
2207 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2208 gen_helper_lducx(cpu_env
, temp
);
2210 case OPC2_32_BO_LEA_SHORTOFF
:
2211 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
2213 case OPC2_32_BO_STLCX_SHORTOFF
:
2214 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2215 gen_helper_stlcx(cpu_env
, temp
);
2217 case OPC2_32_BO_STUCX_SHORTOFF
:
2218 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2219 gen_helper_stucx(cpu_env
, temp
);
2221 case OPC2_32_BO_SWAP_W_SHORTOFF
:
2222 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2223 gen_swap(ctx
, r1
, temp
);
2225 case OPC2_32_BO_SWAP_W_POSTINC
:
2226 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
2227 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2229 case OPC2_32_BO_SWAP_W_PREINC
:
2230 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2231 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
2234 tcg_temp_free(temp
);
2235 tcg_temp_free(temp2
);
2238 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
2245 TCGv temp
, temp2
, temp3
;
2247 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2248 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2249 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2250 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2252 temp
= tcg_temp_new();
2253 temp2
= tcg_temp_new();
2254 temp3
= tcg_const_i32(off10
);
2256 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2257 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2260 case OPC2_32_BO_LDMST_BR
:
2261 gen_ldmst(ctx
, r1
, temp2
);
2262 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2264 case OPC2_32_BO_LDMST_CIRC
:
2265 gen_ldmst(ctx
, r1
, temp2
);
2266 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2268 case OPC2_32_BO_SWAP_W_BR
:
2269 gen_swap(ctx
, r1
, temp2
);
2270 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2272 case OPC2_32_BO_SWAP_W_CIRC
:
2273 gen_swap(ctx
, r1
, temp2
);
2274 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2277 tcg_temp_free(temp
);
2278 tcg_temp_free(temp2
);
2279 tcg_temp_free(temp3
);
2282 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2291 op1
= MASK_OP_MAJOR(ctx
->opcode
);
2295 case OPCM_32_ABS_LDW
:
2296 decode_abs_ldw(env
, ctx
);
2298 case OPCM_32_ABS_LDB
:
2299 decode_abs_ldb(env
, ctx
);
2301 case OPCM_32_ABS_LDMST_SWAP
:
2302 decode_abs_ldst_swap(env
, ctx
);
2304 case OPCM_32_ABS_LDST_CONTEXT
:
2305 decode_abs_ldst_context(env
, ctx
);
2307 case OPCM_32_ABS_STORE
:
2308 decode_abs_store(env
, ctx
);
2310 case OPCM_32_ABS_STOREB_H
:
2311 decode_abs_storeb_h(env
, ctx
);
2313 case OPC1_32_ABS_STOREQ
:
2314 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2315 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2316 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2317 temp2
= tcg_temp_new();
2319 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
2320 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
2322 tcg_temp_free(temp2
);
2323 tcg_temp_free(temp
);
2325 case OPC1_32_ABS_LD_Q
:
2326 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2327 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2328 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2330 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2331 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2333 tcg_temp_free(temp
);
2335 case OPC1_32_ABS_LEA
:
2336 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2337 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2338 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
2341 case OPC1_32_ABSB_ST_T
:
2342 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2343 b
= MASK_OP_ABSB_B(ctx
->opcode
);
2344 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
2346 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2347 temp2
= tcg_temp_new();
2349 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
2350 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
2351 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
2352 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
2354 tcg_temp_free(temp
);
2355 tcg_temp_free(temp2
);
2358 case OPC1_32_B_CALL
:
2359 case OPC1_32_B_CALLA
:
2364 address
= MASK_OP_B_DISP24(ctx
->opcode
);
2365 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
2368 case OPCM_32_BIT_ANDACC
:
2369 decode_bit_andacc(env
, ctx
);
2371 case OPCM_32_BIT_LOGICAL_T1
:
2372 decode_bit_logical_t(env
, ctx
);
2374 case OPCM_32_BIT_INSERT
:
2375 decode_bit_insert(env
, ctx
);
2377 case OPCM_32_BIT_LOGICAL_T2
:
2378 decode_bit_logical_t2(env
, ctx
);
2380 case OPCM_32_BIT_ORAND
:
2381 decode_bit_orand(env
, ctx
);
2383 case OPCM_32_BIT_SH_LOGIC1
:
2384 decode_bit_sh_logic1(env
, ctx
);
2386 case OPCM_32_BIT_SH_LOGIC2
:
2387 decode_bit_sh_logic2(env
, ctx
);
2390 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
2391 decode_bo_addrmode_post_pre_base(env
, ctx
);
2393 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
2394 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
2396 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
2397 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
2399 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
2400 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
2402 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
2403 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
2405 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
2406 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
2411 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
2413 /* 16-Bit Instruction */
2414 if ((ctx
->opcode
& 0x1) == 0) {
2415 ctx
->next_pc
= ctx
->pc
+ 2;
2416 decode_16Bit_opc(env
, ctx
);
2417 /* 32-Bit Instruction */
2419 ctx
->next_pc
= ctx
->pc
+ 4;
2420 decode_32Bit_opc(env
, ctx
);
2425 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
2428 CPUState
*cs
= CPU(cpu
);
2429 CPUTriCoreState
*env
= &cpu
->env
;
2431 target_ulong pc_start
;
2433 uint16_t *gen_opc_end
;
2436 qemu_log("search pc %d\n", search_pc
);
2441 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
2445 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2446 ctx
.bstate
= BS_NONE
;
2447 ctx
.mem_idx
= cpu_mmu_index(env
);
2449 tcg_clear_temp_count();
2451 while (ctx
.bstate
== BS_NONE
) {
2452 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
2453 decode_opc(env
, &ctx
, 0);
2457 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
2458 gen_save_pc(ctx
.next_pc
);
2463 gen_save_pc(ctx
.next_pc
);
2467 ctx
.pc
= ctx
.next_pc
;
2470 gen_tb_end(tb
, num_insns
);
2471 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2473 printf("done_generating search pc\n");
2475 tb
->size
= ctx
.pc
- pc_start
;
2476 tb
->icount
= num_insns
;
2478 if (tcg_check_temp_count()) {
2479 printf("LEAK at %08x\n", env
->PC
);
2483 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2484 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2485 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
2492 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
2494 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
2498 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
2500 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
2504 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
2506 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
2514 void cpu_state_reset(CPUTriCoreState
*env
)
2516 /* Reset Regs to Default Value */
2520 static void tricore_tcg_init_csfr(void)
2522 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
2523 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
2524 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
2525 offsetof(CPUTriCoreState
, PSW
), "PSW");
2526 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
2527 offsetof(CPUTriCoreState
, PC
), "PC");
2528 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
2529 offsetof(CPUTriCoreState
, ICR
), "ICR");
2532 void tricore_tcg_init(void)
2539 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
2541 for (i
= 0 ; i
< 16 ; i
++) {
2542 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
2543 offsetof(CPUTriCoreState
, gpr_a
[i
]),
2546 for (i
= 0 ; i
< 16 ; i
++) {
2547 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
2548 offsetof(CPUTriCoreState
, gpr_d
[i
]),
2551 tricore_tcg_init_csfr();
2552 /* init PSW flag cache */
2553 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
2554 offsetof(CPUTriCoreState
, PSW_USB_C
),
2556 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
2557 offsetof(CPUTriCoreState
, PSW_USB_V
),
2559 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
2560 offsetof(CPUTriCoreState
, PSW_USB_SV
),
2562 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
2563 offsetof(CPUTriCoreState
, PSW_USB_AV
),
2565 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
2566 offsetof(CPUTriCoreState
, PSW_USB_SAV
),