2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
119 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
120 ((offset & 0x0fffff) << 1))
122 /* Functions for load/save to/from memory */
124 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
125 int16_t con
, TCGMemOp mop
)
127 TCGv temp
= tcg_temp_new();
128 tcg_gen_addi_tl(temp
, r2
, con
);
129 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
133 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
134 int16_t con
, TCGMemOp mop
)
136 TCGv temp
= tcg_temp_new();
137 tcg_gen_addi_tl(temp
, r2
, con
);
138 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
142 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
144 TCGv_i64 temp
= tcg_temp_new_i64();
146 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
147 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
149 tcg_temp_free_i64(temp
);
152 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
154 TCGv_i64 temp
= tcg_temp_new_i64();
156 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
157 /* write back to two 32 bit regs */
158 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
160 tcg_temp_free_i64(temp
);
163 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
164 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
166 TCGv temp
= tcg_temp_new();
167 TCGv temp2
= tcg_temp_new();
169 /* temp = (M(EA, word) */
170 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
171 /* temp = temp & ~E[a][63:32]) */
172 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
173 /* temp2 = (E[a][31:0] & E[a][63:32]); */
174 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
175 /* temp = temp | temp2; */
176 tcg_gen_or_tl(temp
, temp
, temp2
);
177 /* M(EA, word) = temp; */
178 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
181 tcg_temp_free(temp2
);
184 /* tmp = M(EA, word);
187 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
189 TCGv temp
= tcg_temp_new();
191 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
192 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
193 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
198 /* Functions for arithmetic instructions */
200 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
202 TCGv t0
= tcg_temp_new_i32();
203 TCGv result
= tcg_temp_new_i32();
204 /* Addition and set V/SV bits */
205 tcg_gen_add_tl(result
, r1
, r2
);
207 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
208 tcg_gen_xor_tl(t0
, r1
, r2
);
209 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
211 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
212 /* Calc AV/SAV bits */
213 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
214 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
216 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
217 /* write back result */
218 tcg_gen_mov_tl(ret
, result
);
220 tcg_temp_free(result
);
224 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
226 TCGv temp
= tcg_const_i32(r2
);
227 gen_add_d(ret
, r1
, temp
);
231 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
234 TCGv temp
= tcg_temp_new();
235 TCGv temp2
= tcg_temp_new();
236 TCGv result
= tcg_temp_new();
237 TCGv mask
= tcg_temp_new();
238 TCGv t0
= tcg_const_i32(0);
240 /* create mask for sticky bits */
241 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
242 tcg_gen_shli_tl(mask
, mask
, 31);
244 tcg_gen_add_tl(result
, r1
, r2
);
246 tcg_gen_xor_tl(temp
, result
, r1
);
247 tcg_gen_xor_tl(temp2
, r1
, r2
);
248 tcg_gen_andc_tl(temp
, temp
, temp2
);
249 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
251 tcg_gen_and_tl(temp
, temp
, mask
);
252 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
254 tcg_gen_add_tl(temp
, result
, result
);
255 tcg_gen_xor_tl(temp
, temp
, result
);
256 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
258 tcg_gen_and_tl(temp
, temp
, mask
);
259 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
260 /* write back result */
261 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
265 tcg_temp_free(temp2
);
266 tcg_temp_free(result
);
270 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
273 TCGv temp
= tcg_const_i32(r2
);
274 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
278 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
280 TCGv temp
= tcg_temp_new_i32();
281 TCGv result
= tcg_temp_new_i32();
283 tcg_gen_sub_tl(result
, r1
, r2
);
285 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
286 tcg_gen_xor_tl(temp
, r1
, r2
);
287 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
289 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
291 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
292 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
294 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
295 /* write back result */
296 tcg_gen_mov_tl(ret
, result
);
299 tcg_temp_free(result
);
302 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
304 TCGv high
= tcg_temp_new();
305 TCGv low
= tcg_temp_new();
307 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
308 tcg_gen_mov_tl(ret
, low
);
310 tcg_gen_sari_tl(low
, low
, 31);
311 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
312 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
314 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
316 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
317 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
319 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
325 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
327 TCGv sat_neg
= tcg_const_i32(low
);
328 TCGv temp
= tcg_const_i32(up
);
330 /* sat_neg = (arg < low ) ? low : arg; */
331 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
333 /* ret = (sat_neg > up ) ? up : sat_neg; */
334 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
336 tcg_temp_free(sat_neg
);
340 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
342 TCGv temp
= tcg_const_i32(up
);
343 /* sat_neg = (arg > up ) ? up : arg; */
344 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
348 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
350 if (shift_count
== -32) {
351 tcg_gen_movi_tl(ret
, 0);
352 } else if (shift_count
>= 0) {
353 tcg_gen_shli_tl(ret
, r1
, shift_count
);
355 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
359 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
361 uint32_t msk
, msk_start
;
362 TCGv temp
= tcg_temp_new();
363 TCGv temp2
= tcg_temp_new();
364 TCGv t_0
= tcg_const_i32(0);
366 if (shift_count
== 0) {
367 /* Clear PSW.C and PSW.V */
368 tcg_gen_movi_tl(cpu_PSW_C
, 0);
369 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
370 tcg_gen_mov_tl(ret
, r1
);
371 } else if (shift_count
== -32) {
373 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
374 /* fill ret completly with sign bit */
375 tcg_gen_sari_tl(ret
, r1
, 31);
377 tcg_gen_movi_tl(cpu_PSW_V
, 0);
378 } else if (shift_count
> 0) {
379 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
380 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
383 msk_start
= 32 - shift_count
;
384 msk
= ((1 << shift_count
) - 1) << msk_start
;
385 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
387 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
388 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
389 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
390 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
392 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
394 tcg_gen_shli_tl(ret
, r1
, shift_count
);
396 tcg_temp_free(t_max
);
397 tcg_temp_free(t_min
);
400 tcg_gen_movi_tl(cpu_PSW_V
, 0);
402 msk
= (1 << -shift_count
) - 1;
403 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
405 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
407 /* calc av overflow bit */
408 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
409 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
410 /* calc sav overflow bit */
411 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
414 tcg_temp_free(temp2
);
418 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
420 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
423 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
425 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
428 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
430 void(*op1
)(TCGv
, TCGv
, TCGv
),
431 void(*op2
)(TCGv
, TCGv
, TCGv
))
435 temp1
= tcg_temp_new();
436 temp2
= tcg_temp_new();
438 tcg_gen_shri_tl(temp2
, r2
, pos2
);
439 tcg_gen_shri_tl(temp1
, r1
, pos1
);
441 (*op1
)(temp1
, temp1
, temp2
);
442 (*op2
)(temp1
, ret
, temp1
);
444 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
446 tcg_temp_free(temp1
);
447 tcg_temp_free(temp2
);
450 /* ret = r1[pos1] op1 r2[pos2]; */
451 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
453 void(*op1
)(TCGv
, TCGv
, TCGv
))
457 temp1
= tcg_temp_new();
458 temp2
= tcg_temp_new();
460 tcg_gen_shri_tl(temp2
, r2
, pos2
);
461 tcg_gen_shri_tl(temp1
, r1
, pos1
);
463 (*op1
)(ret
, temp1
, temp2
);
465 tcg_gen_andi_tl(ret
, ret
, 0x1);
467 tcg_temp_free(temp1
);
468 tcg_temp_free(temp2
);
471 /* helpers for generating program flow micro-ops */
473 static inline void gen_save_pc(target_ulong pc
)
475 tcg_gen_movi_tl(cpu_PC
, pc
);
478 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
480 TranslationBlock
*tb
;
482 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
483 likely(!ctx
->singlestep_enabled
)) {
486 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
489 if (ctx
->singlestep_enabled
) {
490 /* raise exception debug */
496 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
497 TCGv r2
, int16_t address
)
500 jumpLabel
= gen_new_label();
501 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
503 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
505 gen_set_label(jumpLabel
);
506 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
509 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
510 int r2
, int16_t address
)
512 TCGv temp
= tcg_const_i32(r2
);
513 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
517 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
520 l1
= gen_new_label();
522 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
523 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
524 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
526 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
529 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
530 int r2
, int32_t constant
, int32_t offset
)
535 /* SB-format jumps */
538 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
541 case OPC1_16_SB_CALL
:
542 gen_helper_1arg(call
, ctx
->next_pc
);
543 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
546 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
549 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
551 /* SBC-format jumps */
552 case OPC1_16_SBC_JEQ
:
553 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
555 case OPC1_16_SBC_JNE
:
556 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
558 /* SBRN-format jumps */
559 case OPC1_16_SBRN_JZ_T
:
560 temp
= tcg_temp_new();
561 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
562 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
565 case OPC1_16_SBRN_JNZ_T
:
566 temp
= tcg_temp_new();
567 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
568 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
571 /* SBR-format jumps */
572 case OPC1_16_SBR_JEQ
:
573 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
576 case OPC1_16_SBR_JNE
:
577 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
580 case OPC1_16_SBR_JNZ
:
581 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
583 case OPC1_16_SBR_JNZ_A
:
584 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
586 case OPC1_16_SBR_JGEZ
:
587 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
589 case OPC1_16_SBR_JGTZ
:
590 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
592 case OPC1_16_SBR_JLEZ
:
593 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
595 case OPC1_16_SBR_JLTZ
:
596 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
599 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
601 case OPC1_16_SBR_JZ_A
:
602 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
604 case OPC1_16_SBR_LOOP
:
605 gen_loop(ctx
, r1
, offset
* 2 - 32);
607 /* SR-format jumps */
609 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
613 gen_helper_ret(cpu_env
);
617 case OPC1_32_B_CALLA
:
618 gen_helper_1arg(call
, ctx
->next_pc
);
619 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
622 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
624 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
627 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
628 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
631 printf("Branch Error at %x\n", ctx
->pc
);
633 ctx
->bstate
= BS_BRANCH
;
638 * Functions for decoding instructions
641 static void decode_src_opc(DisasContext
*ctx
, int op1
)
647 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
648 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
651 case OPC1_16_SRC_ADD
:
652 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
654 case OPC1_16_SRC_ADD_A15
:
655 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
657 case OPC1_16_SRC_ADD_15A
:
658 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
660 case OPC1_16_SRC_ADD_A
:
661 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
663 case OPC1_16_SRC_CADD
:
664 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
667 case OPC1_16_SRC_CADDN
:
668 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
671 case OPC1_16_SRC_CMOV
:
672 temp
= tcg_const_tl(0);
673 temp2
= tcg_const_tl(const4
);
674 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
675 temp2
, cpu_gpr_d
[r1
]);
677 tcg_temp_free(temp2
);
679 case OPC1_16_SRC_CMOVN
:
680 temp
= tcg_const_tl(0);
681 temp2
= tcg_const_tl(const4
);
682 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
683 temp2
, cpu_gpr_d
[r1
]);
685 tcg_temp_free(temp2
);
688 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
692 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
695 case OPC1_16_SRC_MOV
:
696 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
698 case OPC1_16_SRC_MOV_A
:
699 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
700 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
703 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
705 case OPC1_16_SRC_SHA
:
706 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
711 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
716 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
717 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
720 case OPC1_16_SRR_ADD
:
721 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
723 case OPC1_16_SRR_ADD_A15
:
724 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
726 case OPC1_16_SRR_ADD_15A
:
727 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
729 case OPC1_16_SRR_ADD_A
:
730 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
732 case OPC1_16_SRR_ADDS
:
733 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
735 case OPC1_16_SRR_AND
:
736 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
738 case OPC1_16_SRR_CMOV
:
739 temp
= tcg_const_tl(0);
740 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
741 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
744 case OPC1_16_SRR_CMOVN
:
745 temp
= tcg_const_tl(0);
746 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
747 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
751 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
755 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
758 case OPC1_16_SRR_MOV
:
759 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
761 case OPC1_16_SRR_MOV_A
:
762 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
764 case OPC1_16_SRR_MOV_AA
:
765 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
767 case OPC1_16_SRR_MOV_D
:
768 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
770 case OPC1_16_SRR_MUL
:
771 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
774 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
776 case OPC1_16_SRR_SUB
:
777 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
779 case OPC1_16_SRR_SUB_A15B
:
780 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
782 case OPC1_16_SRR_SUB_15AB
:
783 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
785 case OPC1_16_SRR_SUBS
:
786 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
788 case OPC1_16_SRR_XOR
:
789 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
794 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
798 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
799 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
802 case OPC1_16_SSR_ST_A
:
803 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
805 case OPC1_16_SSR_ST_A_POSTINC
:
806 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
807 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
809 case OPC1_16_SSR_ST_B
:
810 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
812 case OPC1_16_SSR_ST_B_POSTINC
:
813 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
814 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
816 case OPC1_16_SSR_ST_H
:
817 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
819 case OPC1_16_SSR_ST_H_POSTINC
:
820 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
821 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
823 case OPC1_16_SSR_ST_W
:
824 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
826 case OPC1_16_SSR_ST_W_POSTINC
:
827 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
828 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
833 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
837 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
841 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
843 case OPC1_16_SC_BISR
:
844 gen_helper_1arg(bisr
, const16
& 0xff);
846 case OPC1_16_SC_LD_A
:
847 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
849 case OPC1_16_SC_LD_W
:
850 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
853 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
856 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
858 case OPC1_16_SC_ST_A
:
859 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
861 case OPC1_16_SC_ST_W
:
862 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
864 case OPC1_16_SC_SUB_A
:
865 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
870 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
874 r1
= MASK_OP_SLR_D(ctx
->opcode
);
875 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
879 case OPC1_16_SLR_LD_A
:
880 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
882 case OPC1_16_SLR_LD_A_POSTINC
:
883 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
884 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
886 case OPC1_16_SLR_LD_BU
:
887 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
889 case OPC1_16_SLR_LD_BU_POSTINC
:
890 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
891 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
893 case OPC1_16_SLR_LD_H
:
894 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
896 case OPC1_16_SLR_LD_H_POSTINC
:
897 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
898 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
900 case OPC1_16_SLR_LD_W
:
901 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
903 case OPC1_16_SLR_LD_W_POSTINC
:
904 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
905 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
910 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
915 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
916 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
920 case OPC1_16_SRO_LD_A
:
921 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
923 case OPC1_16_SRO_LD_BU
:
924 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
926 case OPC1_16_SRO_LD_H
:
927 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
929 case OPC1_16_SRO_LD_W
:
930 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
932 case OPC1_16_SRO_ST_A
:
933 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
935 case OPC1_16_SRO_ST_B
:
936 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
938 case OPC1_16_SRO_ST_H
:
939 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
941 case OPC1_16_SRO_ST_W
:
942 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
947 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
950 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
956 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
959 gen_helper_rfe(cpu_env
);
961 ctx
->bstate
= BS_BRANCH
;
963 case OPC2_16_SR_DEBUG
:
964 /* raise EXCP_DEBUG */
969 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
975 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
976 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
979 case OPC2_16_SR_RSUB
:
980 /* overflow only if r1 = -0x80000000 */
981 temp
= tcg_const_i32(-0x80000000);
983 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
984 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
986 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
988 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
990 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
991 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
993 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
996 case OPC2_16_SR_SAT_B
:
997 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
999 case OPC2_16_SR_SAT_BU
:
1000 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
1002 case OPC2_16_SR_SAT_H
:
1003 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
1005 case OPC2_16_SR_SAT_HU
:
1006 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
1011 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1019 op1
= MASK_OP_MAJOR(ctx
->opcode
);
1021 /* handle ADDSC.A opcode only being 6 bit long */
1022 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
1023 op1
= OPC1_16_SRRS_ADDSC_A
;
1027 case OPC1_16_SRC_ADD
:
1028 case OPC1_16_SRC_ADD_A15
:
1029 case OPC1_16_SRC_ADD_15A
:
1030 case OPC1_16_SRC_ADD_A
:
1031 case OPC1_16_SRC_CADD
:
1032 case OPC1_16_SRC_CADDN
:
1033 case OPC1_16_SRC_CMOV
:
1034 case OPC1_16_SRC_CMOVN
:
1035 case OPC1_16_SRC_EQ
:
1036 case OPC1_16_SRC_LT
:
1037 case OPC1_16_SRC_MOV
:
1038 case OPC1_16_SRC_MOV_A
:
1039 case OPC1_16_SRC_SH
:
1040 case OPC1_16_SRC_SHA
:
1041 decode_src_opc(ctx
, op1
);
1044 case OPC1_16_SRR_ADD
:
1045 case OPC1_16_SRR_ADD_A15
:
1046 case OPC1_16_SRR_ADD_15A
:
1047 case OPC1_16_SRR_ADD_A
:
1048 case OPC1_16_SRR_ADDS
:
1049 case OPC1_16_SRR_AND
:
1050 case OPC1_16_SRR_CMOV
:
1051 case OPC1_16_SRR_CMOVN
:
1052 case OPC1_16_SRR_EQ
:
1053 case OPC1_16_SRR_LT
:
1054 case OPC1_16_SRR_MOV
:
1055 case OPC1_16_SRR_MOV_A
:
1056 case OPC1_16_SRR_MOV_AA
:
1057 case OPC1_16_SRR_MOV_D
:
1058 case OPC1_16_SRR_MUL
:
1059 case OPC1_16_SRR_OR
:
1060 case OPC1_16_SRR_SUB
:
1061 case OPC1_16_SRR_SUB_A15B
:
1062 case OPC1_16_SRR_SUB_15AB
:
1063 case OPC1_16_SRR_SUBS
:
1064 case OPC1_16_SRR_XOR
:
1065 decode_srr_opc(ctx
, op1
);
1068 case OPC1_16_SSR_ST_A
:
1069 case OPC1_16_SSR_ST_A_POSTINC
:
1070 case OPC1_16_SSR_ST_B
:
1071 case OPC1_16_SSR_ST_B_POSTINC
:
1072 case OPC1_16_SSR_ST_H
:
1073 case OPC1_16_SSR_ST_H_POSTINC
:
1074 case OPC1_16_SSR_ST_W
:
1075 case OPC1_16_SSR_ST_W_POSTINC
:
1076 decode_ssr_opc(ctx
, op1
);
1079 case OPC1_16_SRRS_ADDSC_A
:
1080 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
1081 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
1082 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
1083 temp
= tcg_temp_new();
1084 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
1085 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
1086 tcg_temp_free(temp
);
1089 case OPC1_16_SLRO_LD_A
:
1090 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1091 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1092 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1094 case OPC1_16_SLRO_LD_BU
:
1095 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1096 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1097 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1099 case OPC1_16_SLRO_LD_H
:
1100 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1101 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1102 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1104 case OPC1_16_SLRO_LD_W
:
1105 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
1106 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
1107 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1110 case OPC1_16_SB_CALL
:
1112 case OPC1_16_SB_JNZ
:
1114 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
1115 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
1118 case OPC1_16_SBC_JEQ
:
1119 case OPC1_16_SBC_JNE
:
1120 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
1121 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
1122 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1125 case OPC1_16_SBRN_JNZ_T
:
1126 case OPC1_16_SBRN_JZ_T
:
1127 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
1128 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
1129 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
1132 case OPC1_16_SBR_JEQ
:
1133 case OPC1_16_SBR_JGEZ
:
1134 case OPC1_16_SBR_JGTZ
:
1135 case OPC1_16_SBR_JLEZ
:
1136 case OPC1_16_SBR_JLTZ
:
1137 case OPC1_16_SBR_JNE
:
1138 case OPC1_16_SBR_JNZ
:
1139 case OPC1_16_SBR_JNZ_A
:
1140 case OPC1_16_SBR_JZ
:
1141 case OPC1_16_SBR_JZ_A
:
1142 case OPC1_16_SBR_LOOP
:
1143 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
1144 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
1145 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
1148 case OPC1_16_SC_AND
:
1149 case OPC1_16_SC_BISR
:
1150 case OPC1_16_SC_LD_A
:
1151 case OPC1_16_SC_LD_W
:
1152 case OPC1_16_SC_MOV
:
1154 case OPC1_16_SC_ST_A
:
1155 case OPC1_16_SC_ST_W
:
1156 case OPC1_16_SC_SUB_A
:
1157 decode_sc_opc(ctx
, op1
);
1160 case OPC1_16_SLR_LD_A
:
1161 case OPC1_16_SLR_LD_A_POSTINC
:
1162 case OPC1_16_SLR_LD_BU
:
1163 case OPC1_16_SLR_LD_BU_POSTINC
:
1164 case OPC1_16_SLR_LD_H
:
1165 case OPC1_16_SLR_LD_H_POSTINC
:
1166 case OPC1_16_SLR_LD_W
:
1167 case OPC1_16_SLR_LD_W_POSTINC
:
1168 decode_slr_opc(ctx
, op1
);
1171 case OPC1_16_SRO_LD_A
:
1172 case OPC1_16_SRO_LD_BU
:
1173 case OPC1_16_SRO_LD_H
:
1174 case OPC1_16_SRO_LD_W
:
1175 case OPC1_16_SRO_ST_A
:
1176 case OPC1_16_SRO_ST_B
:
1177 case OPC1_16_SRO_ST_H
:
1178 case OPC1_16_SRO_ST_W
:
1179 decode_sro_opc(ctx
, op1
);
1182 case OPC1_16_SSRO_ST_A
:
1183 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1184 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1185 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1187 case OPC1_16_SSRO_ST_B
:
1188 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1189 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1190 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
1192 case OPC1_16_SSRO_ST_H
:
1193 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1194 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1195 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
1197 case OPC1_16_SSRO_ST_W
:
1198 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
1199 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
1200 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
1203 case OPCM_16_SR_SYSTEM
:
1204 decode_sr_system(env
, ctx
);
1206 case OPCM_16_SR_ACCU
:
1207 decode_sr_accu(env
, ctx
);
1210 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1211 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
1213 case OPC1_16_SR_NOT
:
1214 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1215 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1221 * 32 bit instructions
1225 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
1232 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1233 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1234 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1236 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1239 case OPC2_32_ABS_LD_A
:
1240 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1242 case OPC2_32_ABS_LD_D
:
1243 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1245 case OPC2_32_ABS_LD_DA
:
1246 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1248 case OPC2_32_ABS_LD_W
:
1249 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1253 tcg_temp_free(temp
);
1256 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
1263 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1264 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1265 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1267 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1270 case OPC2_32_ABS_LD_B
:
1271 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
1273 case OPC2_32_ABS_LD_BU
:
1274 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1276 case OPC2_32_ABS_LD_H
:
1277 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
1279 case OPC2_32_ABS_LD_HU
:
1280 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1284 tcg_temp_free(temp
);
1287 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
1294 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1295 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1296 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1298 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1301 case OPC2_32_ABS_LDMST
:
1302 gen_ldmst(ctx
, r1
, temp
);
1304 case OPC2_32_ABS_SWAP_W
:
1305 gen_swap(ctx
, r1
, temp
);
1309 tcg_temp_free(temp
);
1312 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
1317 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1318 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1321 case OPC2_32_ABS_LDLCX
:
1322 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
1324 case OPC2_32_ABS_LDUCX
:
1325 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
1327 case OPC2_32_ABS_STLCX
:
1328 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
1330 case OPC2_32_ABS_STUCX
:
1331 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
1336 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
1343 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1344 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1345 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1347 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1350 case OPC2_32_ABS_ST_A
:
1351 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1353 case OPC2_32_ABS_ST_D
:
1354 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
1356 case OPC2_32_ABS_ST_DA
:
1357 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
1359 case OPC2_32_ABS_ST_W
:
1360 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
1364 tcg_temp_free(temp
);
1367 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
1374 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1375 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1376 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
1378 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1381 case OPC2_32_ABS_ST_B
:
1382 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
1384 case OPC2_32_ABS_ST_H
:
1385 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1388 tcg_temp_free(temp
);
1393 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1399 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1400 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1401 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1402 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1403 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1404 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1408 case OPC2_32_BIT_AND_AND_T
:
1409 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1410 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
1412 case OPC2_32_BIT_AND_ANDN_T
:
1413 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1414 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
1416 case OPC2_32_BIT_AND_NOR_T
:
1417 if (TCG_TARGET_HAS_andc_i32
) {
1418 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1419 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
1421 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1422 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
1425 case OPC2_32_BIT_AND_OR_T
:
1426 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1427 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
1432 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
1437 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1438 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1439 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1440 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1441 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1442 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1445 case OPC2_32_BIT_AND_T
:
1446 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1447 pos1
, pos2
, &tcg_gen_and_tl
);
1449 case OPC2_32_BIT_ANDN_T
:
1450 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1451 pos1
, pos2
, &tcg_gen_andc_tl
);
1453 case OPC2_32_BIT_NOR_T
:
1454 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1455 pos1
, pos2
, &tcg_gen_nor_tl
);
1457 case OPC2_32_BIT_OR_T
:
1458 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1459 pos1
, pos2
, &tcg_gen_or_tl
);
1464 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
1470 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1471 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1472 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1473 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1474 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1475 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1477 temp
= tcg_temp_new();
1479 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
1480 if (op2
== OPC2_32_BIT_INSN_T
) {
1481 tcg_gen_not_tl(temp
, temp
);
1483 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
1484 tcg_temp_free(temp
);
1487 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1494 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1495 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1496 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1497 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1498 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1499 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1502 case OPC2_32_BIT_NAND_T
:
1503 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1504 pos1
, pos2
, &tcg_gen_nand_tl
);
1506 case OPC2_32_BIT_ORN_T
:
1507 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1508 pos1
, pos2
, &tcg_gen_orc_tl
);
1510 case OPC2_32_BIT_XNOR_T
:
1511 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1512 pos1
, pos2
, &tcg_gen_eqv_tl
);
1514 case OPC2_32_BIT_XOR_T
:
1515 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1516 pos1
, pos2
, &tcg_gen_xor_tl
);
1521 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
1528 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1529 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1530 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1531 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1532 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1533 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1536 case OPC2_32_BIT_OR_AND_T
:
1537 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1538 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
1540 case OPC2_32_BIT_OR_ANDN_T
:
1541 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1542 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
1544 case OPC2_32_BIT_OR_NOR_T
:
1545 if (TCG_TARGET_HAS_orc_i32
) {
1546 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1547 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
1549 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1550 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
1553 case OPC2_32_BIT_OR_OR_T
:
1554 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1555 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
1560 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
1567 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1568 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1569 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1570 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1571 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1572 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1574 temp
= tcg_temp_new();
1577 case OPC2_32_BIT_SH_AND_T
:
1578 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1579 pos1
, pos2
, &tcg_gen_and_tl
);
1581 case OPC2_32_BIT_SH_ANDN_T
:
1582 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1583 pos1
, pos2
, &tcg_gen_andc_tl
);
1585 case OPC2_32_BIT_SH_NOR_T
:
1586 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1587 pos1
, pos2
, &tcg_gen_nor_tl
);
1589 case OPC2_32_BIT_SH_OR_T
:
1590 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1591 pos1
, pos2
, &tcg_gen_or_tl
);
1594 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1595 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1596 tcg_temp_free(temp
);
1599 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
1606 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
1607 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
1608 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
1609 r3
= MASK_OP_BIT_D(ctx
->opcode
);
1610 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
1611 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
1613 temp
= tcg_temp_new();
1616 case OPC2_32_BIT_SH_NAND_T
:
1617 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
1618 pos1
, pos2
, &tcg_gen_nand_tl
);
1620 case OPC2_32_BIT_SH_ORN_T
:
1621 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1622 pos1
, pos2
, &tcg_gen_orc_tl
);
1624 case OPC2_32_BIT_SH_XNOR_T
:
1625 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1626 pos1
, pos2
, &tcg_gen_eqv_tl
);
1628 case OPC2_32_BIT_SH_XOR_T
:
1629 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1630 pos1
, pos2
, &tcg_gen_xor_tl
);
1633 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
1634 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
1635 tcg_temp_free(temp
);
1638 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1647 op1
= MASK_OP_MAJOR(ctx
->opcode
);
1651 case OPCM_32_ABS_LDW
:
1652 decode_abs_ldw(env
, ctx
);
1654 case OPCM_32_ABS_LDB
:
1655 decode_abs_ldb(env
, ctx
);
1657 case OPCM_32_ABS_LDMST_SWAP
:
1658 decode_abs_ldst_swap(env
, ctx
);
1660 case OPCM_32_ABS_LDST_CONTEXT
:
1661 decode_abs_ldst_context(env
, ctx
);
1663 case OPCM_32_ABS_STORE
:
1664 decode_abs_store(env
, ctx
);
1666 case OPCM_32_ABS_STOREB_H
:
1667 decode_abs_storeb_h(env
, ctx
);
1669 case OPC1_32_ABS_STOREQ
:
1670 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1671 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1672 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1673 temp2
= tcg_temp_new();
1675 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
1676 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
1678 tcg_temp_free(temp2
);
1679 tcg_temp_free(temp
);
1681 case OPC1_32_ABS_LD_Q
:
1682 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1683 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1684 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1686 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
1687 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
1689 tcg_temp_free(temp
);
1691 case OPC1_32_ABS_LEA
:
1692 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1693 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
1694 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
1697 case OPC1_32_ABSB_ST_T
:
1698 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
1699 b
= MASK_OP_ABSB_B(ctx
->opcode
);
1700 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
1702 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
1703 temp2
= tcg_temp_new();
1705 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
1706 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
1707 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
1708 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
1710 tcg_temp_free(temp
);
1711 tcg_temp_free(temp2
);
1714 case OPC1_32_B_CALL
:
1715 case OPC1_32_B_CALLA
:
1720 address
= MASK_OP_B_DISP24(ctx
->opcode
);
1721 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
1724 case OPCM_32_BIT_ANDACC
:
1725 decode_bit_andacc(env
, ctx
);
1727 case OPCM_32_BIT_LOGICAL_T1
:
1728 decode_bit_logical_t(env
, ctx
);
1730 case OPCM_32_BIT_INSERT
:
1731 decode_bit_insert(env
, ctx
);
1733 case OPCM_32_BIT_LOGICAL_T2
:
1734 decode_bit_logical_t2(env
, ctx
);
1736 case OPCM_32_BIT_ORAND
:
1737 decode_bit_orand(env
, ctx
);
1739 case OPCM_32_BIT_SH_LOGIC1
:
1740 decode_bit_sh_logic1(env
, ctx
);
1742 case OPCM_32_BIT_SH_LOGIC2
:
1743 decode_bit_sh_logic2(env
, ctx
);
1748 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
1750 /* 16-Bit Instruction */
1751 if ((ctx
->opcode
& 0x1) == 0) {
1752 ctx
->next_pc
= ctx
->pc
+ 2;
1753 decode_16Bit_opc(env
, ctx
);
1754 /* 32-Bit Instruction */
1756 ctx
->next_pc
= ctx
->pc
+ 4;
1757 decode_32Bit_opc(env
, ctx
);
1762 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
1765 CPUState
*cs
= CPU(cpu
);
1766 CPUTriCoreState
*env
= &cpu
->env
;
1768 target_ulong pc_start
;
1770 uint16_t *gen_opc_end
;
1773 qemu_log("search pc %d\n", search_pc
);
1778 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1782 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1783 ctx
.bstate
= BS_NONE
;
1784 ctx
.mem_idx
= cpu_mmu_index(env
);
1786 tcg_clear_temp_count();
1788 while (ctx
.bstate
== BS_NONE
) {
1789 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
1790 decode_opc(env
, &ctx
, 0);
1794 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
1795 gen_save_pc(ctx
.next_pc
);
1800 gen_save_pc(ctx
.next_pc
);
1804 ctx
.pc
= ctx
.next_pc
;
1807 gen_tb_end(tb
, num_insns
);
1808 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1810 printf("done_generating search pc\n");
1812 tb
->size
= ctx
.pc
- pc_start
;
1813 tb
->icount
= num_insns
;
1815 if (tcg_check_temp_count()) {
1816 printf("LEAK at %08x\n", env
->PC
);
1820 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1821 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
1822 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1829 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1831 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
1835 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
1837 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
1841 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
1843 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1851 void cpu_state_reset(CPUTriCoreState
*env
)
1853 /* Reset Regs to Default Value */
1857 static void tricore_tcg_init_csfr(void)
1859 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
1860 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
1861 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
1862 offsetof(CPUTriCoreState
, PSW
), "PSW");
1863 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
1864 offsetof(CPUTriCoreState
, PC
), "PC");
1865 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
1866 offsetof(CPUTriCoreState
, ICR
), "ICR");
1869 void tricore_tcg_init(void)
1876 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1878 for (i
= 0 ; i
< 16 ; i
++) {
1879 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
1880 offsetof(CPUTriCoreState
, gpr_a
[i
]),
1883 for (i
= 0 ; i
< 16 ; i
++) {
1884 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
1885 offsetof(CPUTriCoreState
, gpr_d
[i
]),
1888 tricore_tcg_init_csfr();
1889 /* init PSW flag cache */
1890 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
1891 offsetof(CPUTriCoreState
, PSW_USB_C
),
1893 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
1894 offsetof(CPUTriCoreState
, PSW_USB_V
),
1896 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
1897 offsetof(CPUTriCoreState
, PSW_USB_SV
),
1899 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
1900 offsetof(CPUTriCoreState
, PSW_USB_AV
),
1902 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
1903 offsetof(CPUTriCoreState
, PSW_USB_SAV
),