2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
90 cpu_fprintf(f
, "PC=%08x\n", env
->PC
);
91 for (i
= 0; i
< 16; ++i
) {
93 cpu_fprintf(f
, "GPR A%02d:", i
);
95 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_a
[i
], env
->gpr_a
[i
]);
97 for (i
= 0; i
< 16; ++i
) {
99 cpu_fprintf(f
, "GPR D%02d:", i
);
101 cpu_fprintf(f
, " %s " TARGET_FMT_lx
, regnames_d
[i
], env
->gpr_d
[i
]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
118 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
119 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
120 ((offset & 0x0fffff) << 1))
122 /* Functions for load/save to/from memory */
124 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
125 int16_t con
, TCGMemOp mop
)
127 TCGv temp
= tcg_temp_new();
128 tcg_gen_addi_tl(temp
, r2
, con
);
129 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
133 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
134 int16_t con
, TCGMemOp mop
)
136 TCGv temp
= tcg_temp_new();
137 tcg_gen_addi_tl(temp
, r2
, con
);
138 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
142 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
144 TCGv_i64 temp
= tcg_temp_new_i64();
146 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
147 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
149 tcg_temp_free_i64(temp
);
152 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
155 TCGv temp
= tcg_temp_new();
156 tcg_gen_addi_tl(temp
, base
, con
);
157 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
161 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
163 TCGv_i64 temp
= tcg_temp_new_i64();
165 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
166 /* write back to two 32 bit regs */
167 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
169 tcg_temp_free_i64(temp
);
172 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
175 TCGv temp
= tcg_temp_new();
176 tcg_gen_addi_tl(temp
, base
, con
);
177 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
181 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
184 TCGv temp
= tcg_temp_new();
185 tcg_gen_addi_tl(temp
, r2
, off
);
186 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
187 tcg_gen_mov_tl(r2
, temp
);
191 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
194 TCGv temp
= tcg_temp_new();
195 tcg_gen_addi_tl(temp
, r2
, off
);
196 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
197 tcg_gen_mov_tl(r2
, temp
);
201 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
202 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
204 TCGv temp
= tcg_temp_new();
205 TCGv temp2
= tcg_temp_new();
207 /* temp = (M(EA, word) */
208 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
209 /* temp = temp & ~E[a][63:32]) */
210 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
211 /* temp2 = (E[a][31:0] & E[a][63:32]); */
212 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
213 /* temp = temp | temp2; */
214 tcg_gen_or_tl(temp
, temp
, temp2
);
215 /* M(EA, word) = temp; */
216 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
219 tcg_temp_free(temp2
);
222 /* tmp = M(EA, word);
225 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
227 TCGv temp
= tcg_temp_new();
229 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
230 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
231 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
236 /* We generate loads and store to core special function register (csfr) through
237 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
238 makros R, A and E, which allow read-only, all and endinit protected access.
239 These makros also specify in which ISA version the csfr was introduced. */
240 #define R(ADDRESS, REG, FEATURE) \
242 if (tricore_feature(env, FEATURE)) { \
243 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
246 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
247 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
248 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
250 /* since we're caching PSW make this a special case */
251 if (offset
== 0xfe04) {
252 gen_helper_psw_read(ret
, cpu_env
);
263 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
264 since no execption occurs */
265 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
267 if (tricore_feature(env, FEATURE)) { \
268 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
271 /* Endinit protected registers
272 TODO: Since the endinit bit is in a register of a not yet implemented
273 watchdog device, we handle endinit protected registers like
274 all-access registers for now. */
275 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
276 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
279 if (ctx
->hflags
& TRICORE_HFLAG_SM
) {
280 /* since we're caching PSW make this a special case */
281 if (offset
== 0xfe04) {
282 gen_helper_psw_write(cpu_env
, r1
);
289 /* generate privilege trap */
293 /* Functions for arithmetic instructions */
295 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
297 TCGv t0
= tcg_temp_new_i32();
298 TCGv result
= tcg_temp_new_i32();
299 /* Addition and set V/SV bits */
300 tcg_gen_add_tl(result
, r1
, r2
);
302 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
303 tcg_gen_xor_tl(t0
, r1
, r2
);
304 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
306 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
307 /* Calc AV/SAV bits */
308 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
309 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
311 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
312 /* write back result */
313 tcg_gen_mov_tl(ret
, result
);
315 tcg_temp_free(result
);
319 /* ret = r2 + (r1 * r3); */
320 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
322 TCGv_i64 t1
= tcg_temp_new_i64();
323 TCGv_i64 t2
= tcg_temp_new_i64();
324 TCGv_i64 t3
= tcg_temp_new_i64();
326 tcg_gen_ext_i32_i64(t1
, r1
);
327 tcg_gen_ext_i32_i64(t2
, r2
);
328 tcg_gen_ext_i32_i64(t3
, r3
);
330 tcg_gen_mul_i64(t1
, t1
, t3
);
331 tcg_gen_add_i64(t1
, t2
, t1
);
333 tcg_gen_trunc_i64_i32(ret
, t1
);
336 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
337 /* t1 < -0x80000000 */
338 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
339 tcg_gen_or_i64(t2
, t2
, t3
);
340 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
341 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
343 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
344 /* Calc AV/SAV bits */
345 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
346 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
348 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
350 tcg_temp_free_i64(t1
);
351 tcg_temp_free_i64(t2
);
352 tcg_temp_free_i64(t3
);
355 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
357 TCGv temp
= tcg_const_i32(con
);
358 gen_madd32_d(ret
, r1
, r2
, temp
);
363 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
366 TCGv t1
= tcg_temp_new();
367 TCGv t2
= tcg_temp_new();
368 TCGv t3
= tcg_temp_new();
369 TCGv t4
= tcg_temp_new();
371 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
372 /* only the add can overflow */
373 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
375 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
376 tcg_gen_xor_tl(t1
, r2_high
, t2
);
377 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
379 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
380 /* Calc AV/SAV bits */
381 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
382 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
384 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
385 /* write back the result */
386 tcg_gen_mov_tl(ret_low
, t3
);
387 tcg_gen_mov_tl(ret_high
, t4
);
396 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
399 TCGv_i64 t1
= tcg_temp_new_i64();
400 TCGv_i64 t2
= tcg_temp_new_i64();
401 TCGv_i64 t3
= tcg_temp_new_i64();
403 tcg_gen_extu_i32_i64(t1
, r1
);
404 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
405 tcg_gen_extu_i32_i64(t3
, r3
);
407 tcg_gen_mul_i64(t1
, t1
, t3
);
408 tcg_gen_add_i64(t2
, t2
, t1
);
409 /* write back result */
410 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
411 /* only the add overflows, if t2 < t1
413 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
414 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
415 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
417 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
418 /* Calc AV/SAV bits */
419 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
420 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
422 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
424 tcg_temp_free_i64(t1
);
425 tcg_temp_free_i64(t2
);
426 tcg_temp_free_i64(t3
);
430 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
433 TCGv temp
= tcg_const_i32(con
);
434 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
439 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
442 TCGv temp
= tcg_const_i32(con
);
443 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
447 /* ret = r2 - (r1 * r3); */
448 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
450 TCGv_i64 t1
= tcg_temp_new_i64();
451 TCGv_i64 t2
= tcg_temp_new_i64();
452 TCGv_i64 t3
= tcg_temp_new_i64();
454 tcg_gen_ext_i32_i64(t1
, r1
);
455 tcg_gen_ext_i32_i64(t2
, r2
);
456 tcg_gen_ext_i32_i64(t3
, r3
);
458 tcg_gen_mul_i64(t1
, t1
, t3
);
459 tcg_gen_sub_i64(t1
, t2
, t1
);
461 tcg_gen_trunc_i64_i32(ret
, t1
);
464 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
465 /* result < -0x80000000 */
466 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
467 tcg_gen_or_i64(t2
, t2
, t3
);
468 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
469 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
472 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
473 /* Calc AV/SAV bits */
474 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
475 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
477 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
479 tcg_temp_free_i64(t1
);
480 tcg_temp_free_i64(t2
);
481 tcg_temp_free_i64(t3
);
484 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
486 TCGv temp
= tcg_const_i32(con
);
487 gen_msub32_d(ret
, r1
, r2
, temp
);
492 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
495 TCGv t1
= tcg_temp_new();
496 TCGv t2
= tcg_temp_new();
497 TCGv t3
= tcg_temp_new();
498 TCGv t4
= tcg_temp_new();
500 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
501 /* only the sub can overflow */
502 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
504 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
505 tcg_gen_xor_tl(t1
, r2_high
, t2
);
506 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
508 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
509 /* Calc AV/SAV bits */
510 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
511 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
513 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
514 /* write back the result */
515 tcg_gen_mov_tl(ret_low
, t3
);
516 tcg_gen_mov_tl(ret_high
, t4
);
525 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
528 TCGv temp
= tcg_const_i32(con
);
529 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
534 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
537 TCGv_i64 t1
= tcg_temp_new_i64();
538 TCGv_i64 t2
= tcg_temp_new_i64();
539 TCGv_i64 t3
= tcg_temp_new_i64();
541 tcg_gen_extu_i32_i64(t1
, r1
);
542 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
543 tcg_gen_extu_i32_i64(t3
, r3
);
545 tcg_gen_mul_i64(t1
, t1
, t3
);
546 tcg_gen_sub_i64(t3
, t2
, t1
);
547 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
548 /* calc V bit, only the sub can overflow, if t1 > t2 */
549 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
550 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
551 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
553 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
554 /* Calc AV/SAV bits */
555 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
556 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
558 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
560 tcg_temp_free_i64(t1
);
561 tcg_temp_free_i64(t2
);
562 tcg_temp_free_i64(t3
);
566 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
569 TCGv temp
= tcg_const_i32(con
);
570 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
574 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
576 TCGv temp
= tcg_const_i32(r2
);
577 gen_add_d(ret
, r1
, temp
);
580 /* calculate the carry bit too */
581 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
583 TCGv t0
= tcg_temp_new_i32();
584 TCGv result
= tcg_temp_new_i32();
586 tcg_gen_movi_tl(t0
, 0);
587 /* Addition and set C/V/SV bits */
588 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
590 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
591 tcg_gen_xor_tl(t0
, r1
, r2
);
592 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
594 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
595 /* Calc AV/SAV bits */
596 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
597 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
599 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
600 /* write back result */
601 tcg_gen_mov_tl(ret
, result
);
603 tcg_temp_free(result
);
607 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
609 TCGv temp
= tcg_const_i32(con
);
610 gen_add_CC(ret
, r1
, temp
);
614 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
616 TCGv carry
= tcg_temp_new_i32();
617 TCGv t0
= tcg_temp_new_i32();
618 TCGv result
= tcg_temp_new_i32();
620 tcg_gen_movi_tl(t0
, 0);
621 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
622 /* Addition, carry and set C/V/SV bits */
623 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
624 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
626 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
627 tcg_gen_xor_tl(t0
, r1
, r2
);
628 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
630 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
631 /* Calc AV/SAV bits */
632 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
633 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
635 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
636 /* write back result */
637 tcg_gen_mov_tl(ret
, result
);
639 tcg_temp_free(result
);
641 tcg_temp_free(carry
);
644 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
646 TCGv temp
= tcg_const_i32(con
);
647 gen_addc_CC(ret
, r1
, temp
);
651 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
654 TCGv temp
= tcg_temp_new();
655 TCGv temp2
= tcg_temp_new();
656 TCGv result
= tcg_temp_new();
657 TCGv mask
= tcg_temp_new();
658 TCGv t0
= tcg_const_i32(0);
660 /* create mask for sticky bits */
661 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
662 tcg_gen_shli_tl(mask
, mask
, 31);
664 tcg_gen_add_tl(result
, r1
, r2
);
666 tcg_gen_xor_tl(temp
, result
, r1
);
667 tcg_gen_xor_tl(temp2
, r1
, r2
);
668 tcg_gen_andc_tl(temp
, temp
, temp2
);
669 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
671 tcg_gen_and_tl(temp
, temp
, mask
);
672 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
674 tcg_gen_add_tl(temp
, result
, result
);
675 tcg_gen_xor_tl(temp
, temp
, result
);
676 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
678 tcg_gen_and_tl(temp
, temp
, mask
);
679 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
680 /* write back result */
681 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
685 tcg_temp_free(temp2
);
686 tcg_temp_free(result
);
690 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
693 TCGv temp
= tcg_const_i32(r2
);
694 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
698 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
700 TCGv temp
= tcg_temp_new_i32();
701 TCGv result
= tcg_temp_new_i32();
703 tcg_gen_sub_tl(result
, r1
, r2
);
705 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
706 tcg_gen_xor_tl(temp
, r1
, r2
);
707 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
709 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
711 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
712 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
714 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
715 /* write back result */
716 tcg_gen_mov_tl(ret
, result
);
719 tcg_temp_free(result
);
722 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
724 TCGv temp
= tcg_temp_new_i32();
725 TCGv result
= tcg_temp_new_i32();
727 tcg_gen_sub_tl(result
, r1
, r2
);
728 tcg_gen_sub_tl(temp
, r2
, r1
);
729 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
732 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
733 tcg_gen_xor_tl(temp
, result
, r2
);
734 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
735 tcg_gen_xor_tl(temp
, r1
, r2
);
736 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
738 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
740 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
741 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
743 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
744 /* write back result */
745 tcg_gen_mov_tl(ret
, result
);
748 tcg_temp_free(result
);
751 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
753 TCGv temp
= tcg_const_i32(con
);
754 gen_absdif(ret
, r1
, temp
);
758 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
760 TCGv temp
= tcg_const_i32(con
);
761 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
765 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
767 TCGv high
= tcg_temp_new();
768 TCGv low
= tcg_temp_new();
770 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
771 tcg_gen_mov_tl(ret
, low
);
773 tcg_gen_sari_tl(low
, low
, 31);
774 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
775 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
777 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
779 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
780 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
782 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
788 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
790 TCGv temp
= tcg_const_i32(con
);
791 gen_mul_i32s(ret
, r1
, temp
);
795 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
797 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
799 tcg_gen_movi_tl(cpu_PSW_V
, 0);
801 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
803 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
804 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
806 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
809 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
812 TCGv temp
= tcg_const_i32(con
);
813 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
817 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
819 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
821 tcg_gen_movi_tl(cpu_PSW_V
, 0);
823 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
825 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
826 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
828 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
831 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
834 TCGv temp
= tcg_const_i32(con
);
835 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
839 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
841 TCGv temp
= tcg_const_i32(con
);
842 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
846 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
848 TCGv temp
= tcg_const_i32(con
);
849 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
852 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
853 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
855 TCGv temp
= tcg_const_i32(con
);
856 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
860 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
862 TCGv temp
= tcg_const_i32(con
);
863 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
868 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
871 TCGv temp
= tcg_const_i32(con
);
872 TCGv_i64 temp64
= tcg_temp_new_i64();
873 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
874 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, temp
);
875 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
877 tcg_temp_free_i64(temp64
);
881 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
884 TCGv temp
= tcg_const_i32(con
);
885 TCGv_i64 temp64
= tcg_temp_new_i64();
886 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
887 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, temp
);
888 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
890 tcg_temp_free_i64(temp64
);
893 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
895 TCGv temp
= tcg_const_i32(con
);
896 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
900 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
902 TCGv temp
= tcg_const_i32(con
);
903 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
908 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
911 TCGv temp
= tcg_const_i32(con
);
912 TCGv_i64 temp64
= tcg_temp_new_i64();
913 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
914 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, temp
);
915 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
917 tcg_temp_free_i64(temp64
);
921 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
924 TCGv temp
= tcg_const_i32(con
);
925 TCGv_i64 temp64
= tcg_temp_new_i64();
926 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
927 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, temp
);
928 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
930 tcg_temp_free_i64(temp64
);
933 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
935 TCGv sat_neg
= tcg_const_i32(low
);
936 TCGv temp
= tcg_const_i32(up
);
938 /* sat_neg = (arg < low ) ? low : arg; */
939 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
941 /* ret = (sat_neg > up ) ? up : sat_neg; */
942 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
944 tcg_temp_free(sat_neg
);
948 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
950 TCGv temp
= tcg_const_i32(up
);
951 /* sat_neg = (arg > up ) ? up : arg; */
952 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
956 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
958 if (shift_count
== -32) {
959 tcg_gen_movi_tl(ret
, 0);
960 } else if (shift_count
>= 0) {
961 tcg_gen_shli_tl(ret
, r1
, shift_count
);
963 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
967 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
969 TCGv temp_low
, temp_high
;
971 if (shiftcount
== -16) {
972 tcg_gen_movi_tl(ret
, 0);
974 temp_high
= tcg_temp_new();
975 temp_low
= tcg_temp_new();
977 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
978 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
979 gen_shi(temp_low
, temp_low
, shiftcount
);
980 gen_shi(ret
, temp_high
, shiftcount
);
981 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
983 tcg_temp_free(temp_low
);
984 tcg_temp_free(temp_high
);
988 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
990 uint32_t msk
, msk_start
;
991 TCGv temp
= tcg_temp_new();
992 TCGv temp2
= tcg_temp_new();
993 TCGv t_0
= tcg_const_i32(0);
995 if (shift_count
== 0) {
996 /* Clear PSW.C and PSW.V */
997 tcg_gen_movi_tl(cpu_PSW_C
, 0);
998 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
999 tcg_gen_mov_tl(ret
, r1
);
1000 } else if (shift_count
== -32) {
1002 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
1003 /* fill ret completly with sign bit */
1004 tcg_gen_sari_tl(ret
, r1
, 31);
1006 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1007 } else if (shift_count
> 0) {
1008 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
1009 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
1012 msk_start
= 32 - shift_count
;
1013 msk
= ((1 << shift_count
) - 1) << msk_start
;
1014 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1015 /* calc v/sv bits */
1016 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
1017 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
1018 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
1019 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1021 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
1023 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1025 tcg_temp_free(t_max
);
1026 tcg_temp_free(t_min
);
1029 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1031 msk
= (1 << -shift_count
) - 1;
1032 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1034 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1036 /* calc av overflow bit */
1037 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1038 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1039 /* calc sav overflow bit */
1040 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1042 tcg_temp_free(temp
);
1043 tcg_temp_free(temp2
);
1047 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
1049 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
1052 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
1054 TCGv temp
= tcg_const_i32(con
);
1055 gen_shas(ret
, r1
, temp
);
1056 tcg_temp_free(temp
);
1059 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1063 if (shift_count
== 0) {
1064 tcg_gen_mov_tl(ret
, r1
);
1065 } else if (shift_count
> 0) {
1066 low
= tcg_temp_new();
1067 high
= tcg_temp_new();
1069 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
1070 tcg_gen_shli_tl(low
, r1
, shift_count
);
1071 tcg_gen_shli_tl(ret
, high
, shift_count
);
1072 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1075 tcg_temp_free(high
);
1077 low
= tcg_temp_new();
1078 high
= tcg_temp_new();
1080 tcg_gen_ext16s_tl(low
, r1
);
1081 tcg_gen_sari_tl(low
, low
, -shift_count
);
1082 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1083 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1086 tcg_temp_free(high
);
1091 /* ret = {ret[30:0], (r1 cond r2)}; */
1092 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1094 TCGv temp
= tcg_temp_new();
1095 TCGv temp2
= tcg_temp_new();
1097 tcg_gen_shli_tl(temp
, ret
, 1);
1098 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
1099 tcg_gen_or_tl(ret
, temp
, temp2
);
1101 tcg_temp_free(temp
);
1102 tcg_temp_free(temp2
);
1105 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
1107 TCGv temp
= tcg_const_i32(con
);
1108 gen_sh_cond(cond
, ret
, r1
, temp
);
1109 tcg_temp_free(temp
);
1112 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
1114 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
1117 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
1119 TCGv temp
= tcg_const_i32(con
);
1120 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
1121 tcg_temp_free(temp
);
1124 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
1126 TCGv temp
= tcg_const_i32(con
);
1127 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
1128 tcg_temp_free(temp
);
1131 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
1133 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
1136 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
1138 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
1141 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
1143 void(*op1
)(TCGv
, TCGv
, TCGv
),
1144 void(*op2
)(TCGv
, TCGv
, TCGv
))
1148 temp1
= tcg_temp_new();
1149 temp2
= tcg_temp_new();
1151 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1152 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1154 (*op1
)(temp1
, temp1
, temp2
);
1155 (*op2
)(temp1
, ret
, temp1
);
1157 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
1159 tcg_temp_free(temp1
);
1160 tcg_temp_free(temp2
);
1163 /* ret = r1[pos1] op1 r2[pos2]; */
1164 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
1166 void(*op1
)(TCGv
, TCGv
, TCGv
))
1170 temp1
= tcg_temp_new();
1171 temp2
= tcg_temp_new();
1173 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1174 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1176 (*op1
)(ret
, temp1
, temp2
);
1178 tcg_gen_andi_tl(ret
, ret
, 0x1);
1180 tcg_temp_free(temp1
);
1181 tcg_temp_free(temp2
);
1184 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
1185 void(*op
)(TCGv
, TCGv
, TCGv
))
1187 TCGv temp
= tcg_temp_new();
1188 TCGv temp2
= tcg_temp_new();
1189 /* temp = (arg1 cond arg2 )*/
1190 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
1192 tcg_gen_andi_tl(temp2
, ret
, 0x1);
1193 /* temp = temp insn temp2 */
1194 (*op
)(temp
, temp
, temp2
);
1195 /* ret = {ret[31:1], temp} */
1196 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
1198 tcg_temp_free(temp
);
1199 tcg_temp_free(temp2
);
1203 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
1204 void(*op
)(TCGv
, TCGv
, TCGv
))
1206 TCGv temp
= tcg_const_i32(con
);
1207 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
1208 tcg_temp_free(temp
);
1211 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
1213 TCGv b0
= tcg_temp_new();
1214 TCGv b1
= tcg_temp_new();
1215 TCGv b2
= tcg_temp_new();
1216 TCGv b3
= tcg_temp_new();
1219 tcg_gen_andi_tl(b0
, r1
, 0xff);
1220 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
1223 tcg_gen_andi_tl(b1
, r1
, 0xff00);
1224 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
1227 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
1228 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
1231 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
1232 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
1235 tcg_gen_or_tl(ret
, b0
, b1
);
1236 tcg_gen_or_tl(ret
, ret
, b2
);
1237 tcg_gen_or_tl(ret
, ret
, b3
);
1245 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
1247 TCGv h0
= tcg_temp_new();
1248 TCGv h1
= tcg_temp_new();
1251 tcg_gen_andi_tl(h0
, r1
, 0xffff);
1252 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
1255 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
1256 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
1259 tcg_gen_or_tl(ret
, h0
, h1
);
1264 /* mask = ((1 << width) -1) << pos;
1265 ret = (r1 & ~mask) | (r2 << pos) & mask); */
1266 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
1268 TCGv mask
= tcg_temp_new();
1269 TCGv temp
= tcg_temp_new();
1270 TCGv temp2
= tcg_temp_new();
1272 tcg_gen_movi_tl(mask
, 1);
1273 tcg_gen_shl_tl(mask
, mask
, width
);
1274 tcg_gen_subi_tl(mask
, mask
, 1);
1275 tcg_gen_shl_tl(mask
, mask
, pos
);
1277 tcg_gen_shl_tl(temp
, r2
, pos
);
1278 tcg_gen_and_tl(temp
, temp
, mask
);
1279 tcg_gen_andc_tl(temp2
, r1
, mask
);
1280 tcg_gen_or_tl(ret
, temp
, temp2
);
1282 tcg_temp_free(mask
);
1283 tcg_temp_free(temp
);
1284 tcg_temp_free(temp2
);
1287 /* helpers for generating program flow micro-ops */
1289 static inline void gen_save_pc(target_ulong pc
)
1291 tcg_gen_movi_tl(cpu_PC
, pc
);
1294 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
1296 TranslationBlock
*tb
;
1298 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
1299 likely(!ctx
->singlestep_enabled
)) {
1302 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
1305 if (ctx
->singlestep_enabled
) {
1306 /* raise exception debug */
1312 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
1313 TCGv r2
, int16_t address
)
1316 jumpLabel
= gen_new_label();
1317 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
1319 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
1321 gen_set_label(jumpLabel
);
1322 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
1325 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
1326 int r2
, int16_t address
)
1328 TCGv temp
= tcg_const_i32(r2
);
1329 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
1330 tcg_temp_free(temp
);
1333 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
1336 l1
= gen_new_label();
1338 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
1339 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
1340 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
1342 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
1345 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
1346 int r2
, int32_t constant
, int32_t offset
)
1352 /* SB-format jumps */
1355 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1357 case OPC1_32_B_CALL
:
1358 case OPC1_16_SB_CALL
:
1359 gen_helper_1arg(call
, ctx
->next_pc
);
1360 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1363 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
1365 case OPC1_16_SB_JNZ
:
1366 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
1368 /* SBC-format jumps */
1369 case OPC1_16_SBC_JEQ
:
1370 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
1372 case OPC1_16_SBC_JNE
:
1373 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
1375 /* SBRN-format jumps */
1376 case OPC1_16_SBRN_JZ_T
:
1377 temp
= tcg_temp_new();
1378 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
1379 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
1380 tcg_temp_free(temp
);
1382 case OPC1_16_SBRN_JNZ_T
:
1383 temp
= tcg_temp_new();
1384 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
1385 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
1386 tcg_temp_free(temp
);
1388 /* SBR-format jumps */
1389 case OPC1_16_SBR_JEQ
:
1390 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
1393 case OPC1_16_SBR_JNE
:
1394 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
1397 case OPC1_16_SBR_JNZ
:
1398 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
1400 case OPC1_16_SBR_JNZ_A
:
1401 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
1403 case OPC1_16_SBR_JGEZ
:
1404 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
1406 case OPC1_16_SBR_JGTZ
:
1407 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
1409 case OPC1_16_SBR_JLEZ
:
1410 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
1412 case OPC1_16_SBR_JLTZ
:
1413 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
1415 case OPC1_16_SBR_JZ
:
1416 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
1418 case OPC1_16_SBR_JZ_A
:
1419 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
1421 case OPC1_16_SBR_LOOP
:
1422 gen_loop(ctx
, r1
, offset
* 2 - 32);
1424 /* SR-format jumps */
1426 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
1429 case OPC2_16_SR_RET
:
1430 gen_helper_ret(cpu_env
);
1434 case OPC1_32_B_CALLA
:
1435 gen_helper_1arg(call
, ctx
->next_pc
);
1436 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
1439 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
1441 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
1444 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
1445 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1448 case OPCM_32_BRC_EQ_NEQ
:
1449 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
1450 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
1452 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
1455 case OPCM_32_BRC_GE
:
1456 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
1457 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
1459 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
1460 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
1464 case OPCM_32_BRC_JLT
:
1465 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
1466 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
1468 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
1469 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
1473 case OPCM_32_BRC_JNE
:
1474 temp
= tcg_temp_new();
1475 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
1476 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1477 /* subi is unconditional */
1478 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1479 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
1481 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1482 /* addi is unconditional */
1483 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1484 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
1486 tcg_temp_free(temp
);
1489 case OPCM_32_BRN_JTT
:
1490 n
= MASK_OP_BRN_N(ctx
->opcode
);
1492 temp
= tcg_temp_new();
1493 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
1495 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
1496 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
1498 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
1500 tcg_temp_free(temp
);
1503 case OPCM_32_BRR_EQ_NEQ
:
1504 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
1505 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1508 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1512 case OPCM_32_BRR_ADDR_EQ_NEQ
:
1513 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
1514 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1517 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1521 case OPCM_32_BRR_GE
:
1522 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
1523 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1526 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1530 case OPCM_32_BRR_JLT
:
1531 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
1532 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1535 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1539 case OPCM_32_BRR_LOOP
:
1540 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
1541 gen_loop(ctx
, r1
, offset
* 2);
1543 /* OPC2_32_BRR_LOOPU */
1544 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1547 case OPCM_32_BRR_JNE
:
1548 temp
= tcg_temp_new();
1549 temp2
= tcg_temp_new();
1550 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
1551 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1552 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1553 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
1554 /* subi is unconditional */
1555 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1556 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
1558 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1559 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1560 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
1561 /* addi is unconditional */
1562 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1563 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
1565 tcg_temp_free(temp
);
1566 tcg_temp_free(temp2
);
1568 case OPCM_32_BRR_JNZ
:
1569 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
1570 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
1572 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
1576 printf("Branch Error at %x\n", ctx
->pc
);
1578 ctx
->bstate
= BS_BRANCH
;
1583 * Functions for decoding instructions
1586 static void decode_src_opc(DisasContext
*ctx
, int op1
)
1592 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
1593 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
1596 case OPC1_16_SRC_ADD
:
1597 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1599 case OPC1_16_SRC_ADD_A15
:
1600 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
1602 case OPC1_16_SRC_ADD_15A
:
1603 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
1605 case OPC1_16_SRC_ADD_A
:
1606 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
1608 case OPC1_16_SRC_CADD
:
1609 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
1612 case OPC1_16_SRC_CADDN
:
1613 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
1616 case OPC1_16_SRC_CMOV
:
1617 temp
= tcg_const_tl(0);
1618 temp2
= tcg_const_tl(const4
);
1619 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1620 temp2
, cpu_gpr_d
[r1
]);
1621 tcg_temp_free(temp
);
1622 tcg_temp_free(temp2
);
1624 case OPC1_16_SRC_CMOVN
:
1625 temp
= tcg_const_tl(0);
1626 temp2
= tcg_const_tl(const4
);
1627 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1628 temp2
, cpu_gpr_d
[r1
]);
1629 tcg_temp_free(temp
);
1630 tcg_temp_free(temp2
);
1632 case OPC1_16_SRC_EQ
:
1633 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1636 case OPC1_16_SRC_LT
:
1637 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1640 case OPC1_16_SRC_MOV
:
1641 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
1643 case OPC1_16_SRC_MOV_A
:
1644 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
1645 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
1647 case OPC1_16_SRC_SH
:
1648 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1650 case OPC1_16_SRC_SHA
:
1651 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1656 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
1661 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
1662 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
1665 case OPC1_16_SRR_ADD
:
1666 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1668 case OPC1_16_SRR_ADD_A15
:
1669 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
1671 case OPC1_16_SRR_ADD_15A
:
1672 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1674 case OPC1_16_SRR_ADD_A
:
1675 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
1677 case OPC1_16_SRR_ADDS
:
1678 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1680 case OPC1_16_SRR_AND
:
1681 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1683 case OPC1_16_SRR_CMOV
:
1684 temp
= tcg_const_tl(0);
1685 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1686 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
1687 tcg_temp_free(temp
);
1689 case OPC1_16_SRR_CMOVN
:
1690 temp
= tcg_const_tl(0);
1691 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1692 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
1693 tcg_temp_free(temp
);
1695 case OPC1_16_SRR_EQ
:
1696 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1699 case OPC1_16_SRR_LT
:
1700 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1703 case OPC1_16_SRR_MOV
:
1704 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1706 case OPC1_16_SRR_MOV_A
:
1707 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
1709 case OPC1_16_SRR_MOV_AA
:
1710 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
1712 case OPC1_16_SRR_MOV_D
:
1713 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
1715 case OPC1_16_SRR_MUL
:
1716 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1718 case OPC1_16_SRR_OR
:
1719 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1721 case OPC1_16_SRR_SUB
:
1722 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1724 case OPC1_16_SRR_SUB_A15B
:
1725 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
1727 case OPC1_16_SRR_SUB_15AB
:
1728 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1730 case OPC1_16_SRR_SUBS
:
1731 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1733 case OPC1_16_SRR_XOR
:
1734 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1739 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
1743 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
1744 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
1747 case OPC1_16_SSR_ST_A
:
1748 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1750 case OPC1_16_SSR_ST_A_POSTINC
:
1751 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1752 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1754 case OPC1_16_SSR_ST_B
:
1755 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1757 case OPC1_16_SSR_ST_B_POSTINC
:
1758 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1759 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
1761 case OPC1_16_SSR_ST_H
:
1762 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
1764 case OPC1_16_SSR_ST_H_POSTINC
:
1765 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
1766 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
1768 case OPC1_16_SSR_ST_W
:
1769 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1771 case OPC1_16_SSR_ST_W_POSTINC
:
1772 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1773 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1778 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
1782 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
1785 case OPC1_16_SC_AND
:
1786 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
1788 case OPC1_16_SC_BISR
:
1789 gen_helper_1arg(bisr
, const16
& 0xff);
1791 case OPC1_16_SC_LD_A
:
1792 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
1794 case OPC1_16_SC_LD_W
:
1795 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
1797 case OPC1_16_SC_MOV
:
1798 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
1801 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
1803 case OPC1_16_SC_ST_A
:
1804 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
1806 case OPC1_16_SC_ST_W
:
1807 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
1809 case OPC1_16_SC_SUB_A
:
1810 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
1815 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
1819 r1
= MASK_OP_SLR_D(ctx
->opcode
);
1820 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
1824 case OPC1_16_SLR_LD_A
:
1825 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
1827 case OPC1_16_SLR_LD_A_POSTINC
:
1828 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
1829 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1831 case OPC1_16_SLR_LD_BU
:
1832 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1834 case OPC1_16_SLR_LD_BU_POSTINC
:
1835 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1836 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
1838 case OPC1_16_SLR_LD_H
:
1839 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
1841 case OPC1_16_SLR_LD_H_POSTINC
:
1842 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
1843 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
1845 case OPC1_16_SLR_LD_W
:
1846 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
1848 case OPC1_16_SLR_LD_W_POSTINC
:
1849 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
1850 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1855 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
1860 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
1861 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
1865 case OPC1_16_SRO_LD_A
:
1866 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1868 case OPC1_16_SRO_LD_BU
:
1869 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
1871 case OPC1_16_SRO_LD_H
:
1872 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
1874 case OPC1_16_SRO_LD_W
:
1875 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1877 case OPC1_16_SRO_ST_A
:
1878 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1880 case OPC1_16_SRO_ST_B
:
1881 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
1883 case OPC1_16_SRO_ST_H
:
1884 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
1886 case OPC1_16_SRO_ST_W
:
1887 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
1892 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
1895 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
1898 case OPC2_16_SR_NOP
:
1900 case OPC2_16_SR_RET
:
1901 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
1903 case OPC2_16_SR_RFE
:
1904 gen_helper_rfe(cpu_env
);
1906 ctx
->bstate
= BS_BRANCH
;
1908 case OPC2_16_SR_DEBUG
:
1909 /* raise EXCP_DEBUG */
1914 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
1920 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
1921 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
1924 case OPC2_16_SR_RSUB
:
1925 /* overflow only if r1 = -0x80000000 */
1926 temp
= tcg_const_i32(-0x80000000);
1928 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
1929 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1931 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1933 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1935 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
1936 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
1938 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1939 tcg_temp_free(temp
);
1941 case OPC2_16_SR_SAT_B
:
1942 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
1944 case OPC2_16_SR_SAT_BU
:
1945 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
1947 case OPC2_16_SR_SAT_H
:
1948 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
1950 case OPC2_16_SR_SAT_HU
:
1951 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
1956 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
1964 op1
= MASK_OP_MAJOR(ctx
->opcode
);
1966 /* handle ADDSC.A opcode only being 6 bit long */
1967 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
1968 op1
= OPC1_16_SRRS_ADDSC_A
;
1972 case OPC1_16_SRC_ADD
:
1973 case OPC1_16_SRC_ADD_A15
:
1974 case OPC1_16_SRC_ADD_15A
:
1975 case OPC1_16_SRC_ADD_A
:
1976 case OPC1_16_SRC_CADD
:
1977 case OPC1_16_SRC_CADDN
:
1978 case OPC1_16_SRC_CMOV
:
1979 case OPC1_16_SRC_CMOVN
:
1980 case OPC1_16_SRC_EQ
:
1981 case OPC1_16_SRC_LT
:
1982 case OPC1_16_SRC_MOV
:
1983 case OPC1_16_SRC_MOV_A
:
1984 case OPC1_16_SRC_SH
:
1985 case OPC1_16_SRC_SHA
:
1986 decode_src_opc(ctx
, op1
);
1989 case OPC1_16_SRR_ADD
:
1990 case OPC1_16_SRR_ADD_A15
:
1991 case OPC1_16_SRR_ADD_15A
:
1992 case OPC1_16_SRR_ADD_A
:
1993 case OPC1_16_SRR_ADDS
:
1994 case OPC1_16_SRR_AND
:
1995 case OPC1_16_SRR_CMOV
:
1996 case OPC1_16_SRR_CMOVN
:
1997 case OPC1_16_SRR_EQ
:
1998 case OPC1_16_SRR_LT
:
1999 case OPC1_16_SRR_MOV
:
2000 case OPC1_16_SRR_MOV_A
:
2001 case OPC1_16_SRR_MOV_AA
:
2002 case OPC1_16_SRR_MOV_D
:
2003 case OPC1_16_SRR_MUL
:
2004 case OPC1_16_SRR_OR
:
2005 case OPC1_16_SRR_SUB
:
2006 case OPC1_16_SRR_SUB_A15B
:
2007 case OPC1_16_SRR_SUB_15AB
:
2008 case OPC1_16_SRR_SUBS
:
2009 case OPC1_16_SRR_XOR
:
2010 decode_srr_opc(ctx
, op1
);
2013 case OPC1_16_SSR_ST_A
:
2014 case OPC1_16_SSR_ST_A_POSTINC
:
2015 case OPC1_16_SSR_ST_B
:
2016 case OPC1_16_SSR_ST_B_POSTINC
:
2017 case OPC1_16_SSR_ST_H
:
2018 case OPC1_16_SSR_ST_H_POSTINC
:
2019 case OPC1_16_SSR_ST_W
:
2020 case OPC1_16_SSR_ST_W_POSTINC
:
2021 decode_ssr_opc(ctx
, op1
);
2024 case OPC1_16_SRRS_ADDSC_A
:
2025 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
2026 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
2027 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
2028 temp
= tcg_temp_new();
2029 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
2030 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
2031 tcg_temp_free(temp
);
2034 case OPC1_16_SLRO_LD_A
:
2035 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2036 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2037 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2039 case OPC1_16_SLRO_LD_BU
:
2040 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2041 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2042 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2044 case OPC1_16_SLRO_LD_H
:
2045 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2046 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2047 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2049 case OPC1_16_SLRO_LD_W
:
2050 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2051 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2052 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2055 case OPC1_16_SB_CALL
:
2057 case OPC1_16_SB_JNZ
:
2059 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
2060 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
2063 case OPC1_16_SBC_JEQ
:
2064 case OPC1_16_SBC_JNE
:
2065 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
2066 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
2067 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2070 case OPC1_16_SBRN_JNZ_T
:
2071 case OPC1_16_SBRN_JZ_T
:
2072 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
2073 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
2074 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2077 case OPC1_16_SBR_JEQ
:
2078 case OPC1_16_SBR_JGEZ
:
2079 case OPC1_16_SBR_JGTZ
:
2080 case OPC1_16_SBR_JLEZ
:
2081 case OPC1_16_SBR_JLTZ
:
2082 case OPC1_16_SBR_JNE
:
2083 case OPC1_16_SBR_JNZ
:
2084 case OPC1_16_SBR_JNZ_A
:
2085 case OPC1_16_SBR_JZ
:
2086 case OPC1_16_SBR_JZ_A
:
2087 case OPC1_16_SBR_LOOP
:
2088 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
2089 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
2090 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
2093 case OPC1_16_SC_AND
:
2094 case OPC1_16_SC_BISR
:
2095 case OPC1_16_SC_LD_A
:
2096 case OPC1_16_SC_LD_W
:
2097 case OPC1_16_SC_MOV
:
2099 case OPC1_16_SC_ST_A
:
2100 case OPC1_16_SC_ST_W
:
2101 case OPC1_16_SC_SUB_A
:
2102 decode_sc_opc(ctx
, op1
);
2105 case OPC1_16_SLR_LD_A
:
2106 case OPC1_16_SLR_LD_A_POSTINC
:
2107 case OPC1_16_SLR_LD_BU
:
2108 case OPC1_16_SLR_LD_BU_POSTINC
:
2109 case OPC1_16_SLR_LD_H
:
2110 case OPC1_16_SLR_LD_H_POSTINC
:
2111 case OPC1_16_SLR_LD_W
:
2112 case OPC1_16_SLR_LD_W_POSTINC
:
2113 decode_slr_opc(ctx
, op1
);
2116 case OPC1_16_SRO_LD_A
:
2117 case OPC1_16_SRO_LD_BU
:
2118 case OPC1_16_SRO_LD_H
:
2119 case OPC1_16_SRO_LD_W
:
2120 case OPC1_16_SRO_ST_A
:
2121 case OPC1_16_SRO_ST_B
:
2122 case OPC1_16_SRO_ST_H
:
2123 case OPC1_16_SRO_ST_W
:
2124 decode_sro_opc(ctx
, op1
);
2127 case OPC1_16_SSRO_ST_A
:
2128 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2129 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2130 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2132 case OPC1_16_SSRO_ST_B
:
2133 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2134 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2135 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2137 case OPC1_16_SSRO_ST_H
:
2138 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2139 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2140 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2142 case OPC1_16_SSRO_ST_W
:
2143 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2144 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2145 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2148 case OPCM_16_SR_SYSTEM
:
2149 decode_sr_system(env
, ctx
);
2151 case OPCM_16_SR_ACCU
:
2152 decode_sr_accu(env
, ctx
);
2155 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2156 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
2158 case OPC1_16_SR_NOT
:
2159 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2160 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2166 * 32 bit instructions
2170 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
2177 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2178 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2179 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2181 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2184 case OPC2_32_ABS_LD_A
:
2185 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2187 case OPC2_32_ABS_LD_D
:
2188 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2190 case OPC2_32_ABS_LD_DA
:
2191 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2193 case OPC2_32_ABS_LD_W
:
2194 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2198 tcg_temp_free(temp
);
2201 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
2208 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2209 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2210 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2212 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2215 case OPC2_32_ABS_LD_B
:
2216 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
2218 case OPC2_32_ABS_LD_BU
:
2219 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
2221 case OPC2_32_ABS_LD_H
:
2222 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
2224 case OPC2_32_ABS_LD_HU
:
2225 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2229 tcg_temp_free(temp
);
2232 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
2239 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2240 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2241 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2243 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2246 case OPC2_32_ABS_LDMST
:
2247 gen_ldmst(ctx
, r1
, temp
);
2249 case OPC2_32_ABS_SWAP_W
:
2250 gen_swap(ctx
, r1
, temp
);
2254 tcg_temp_free(temp
);
2257 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
2262 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2263 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2266 case OPC2_32_ABS_LDLCX
:
2267 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
2269 case OPC2_32_ABS_LDUCX
:
2270 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
2272 case OPC2_32_ABS_STLCX
:
2273 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
2275 case OPC2_32_ABS_STUCX
:
2276 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
2281 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
2288 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2289 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2290 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2292 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2295 case OPC2_32_ABS_ST_A
:
2296 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2298 case OPC2_32_ABS_ST_D
:
2299 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2301 case OPC2_32_ABS_ST_DA
:
2302 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2304 case OPC2_32_ABS_ST_W
:
2305 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2309 tcg_temp_free(temp
);
2312 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
2319 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2320 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2321 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2323 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2326 case OPC2_32_ABS_ST_B
:
2327 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
2329 case OPC2_32_ABS_ST_H
:
2330 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2333 tcg_temp_free(temp
);
2338 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2344 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2345 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2346 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2347 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2348 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2349 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2353 case OPC2_32_BIT_AND_AND_T
:
2354 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2355 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
2357 case OPC2_32_BIT_AND_ANDN_T
:
2358 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2359 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
2361 case OPC2_32_BIT_AND_NOR_T
:
2362 if (TCG_TARGET_HAS_andc_i32
) {
2363 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2364 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
2366 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2367 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
2370 case OPC2_32_BIT_AND_OR_T
:
2371 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2372 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
2377 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
2382 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2383 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2384 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2385 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2386 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2387 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2390 case OPC2_32_BIT_AND_T
:
2391 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2392 pos1
, pos2
, &tcg_gen_and_tl
);
2394 case OPC2_32_BIT_ANDN_T
:
2395 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2396 pos1
, pos2
, &tcg_gen_andc_tl
);
2398 case OPC2_32_BIT_NOR_T
:
2399 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2400 pos1
, pos2
, &tcg_gen_nor_tl
);
2402 case OPC2_32_BIT_OR_T
:
2403 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2404 pos1
, pos2
, &tcg_gen_or_tl
);
2409 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
2415 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2416 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2417 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2418 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2419 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2420 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2422 temp
= tcg_temp_new();
2424 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
2425 if (op2
== OPC2_32_BIT_INSN_T
) {
2426 tcg_gen_not_tl(temp
, temp
);
2428 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
2429 tcg_temp_free(temp
);
2432 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
2439 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2440 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2441 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2442 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2443 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2444 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2447 case OPC2_32_BIT_NAND_T
:
2448 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2449 pos1
, pos2
, &tcg_gen_nand_tl
);
2451 case OPC2_32_BIT_ORN_T
:
2452 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2453 pos1
, pos2
, &tcg_gen_orc_tl
);
2455 case OPC2_32_BIT_XNOR_T
:
2456 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2457 pos1
, pos2
, &tcg_gen_eqv_tl
);
2459 case OPC2_32_BIT_XOR_T
:
2460 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2461 pos1
, pos2
, &tcg_gen_xor_tl
);
2466 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
2473 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2474 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2475 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2476 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2477 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2478 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2481 case OPC2_32_BIT_OR_AND_T
:
2482 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2483 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
2485 case OPC2_32_BIT_OR_ANDN_T
:
2486 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2487 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
2489 case OPC2_32_BIT_OR_NOR_T
:
2490 if (TCG_TARGET_HAS_orc_i32
) {
2491 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2492 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
2494 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2495 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
2498 case OPC2_32_BIT_OR_OR_T
:
2499 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2500 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
2505 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
2512 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2513 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2514 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2515 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2516 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2517 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2519 temp
= tcg_temp_new();
2522 case OPC2_32_BIT_SH_AND_T
:
2523 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2524 pos1
, pos2
, &tcg_gen_and_tl
);
2526 case OPC2_32_BIT_SH_ANDN_T
:
2527 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2528 pos1
, pos2
, &tcg_gen_andc_tl
);
2530 case OPC2_32_BIT_SH_NOR_T
:
2531 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2532 pos1
, pos2
, &tcg_gen_nor_tl
);
2534 case OPC2_32_BIT_SH_OR_T
:
2535 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2536 pos1
, pos2
, &tcg_gen_or_tl
);
2539 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
2540 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
2541 tcg_temp_free(temp
);
2544 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
2551 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2552 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2553 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2554 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2555 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2556 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2558 temp
= tcg_temp_new();
2561 case OPC2_32_BIT_SH_NAND_T
:
2562 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
2563 pos1
, pos2
, &tcg_gen_nand_tl
);
2565 case OPC2_32_BIT_SH_ORN_T
:
2566 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2567 pos1
, pos2
, &tcg_gen_orc_tl
);
2569 case OPC2_32_BIT_SH_XNOR_T
:
2570 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2571 pos1
, pos2
, &tcg_gen_eqv_tl
);
2573 case OPC2_32_BIT_SH_XOR_T
:
2574 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2575 pos1
, pos2
, &tcg_gen_xor_tl
);
2578 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
2579 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
2580 tcg_temp_free(temp
);
2586 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
2594 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2595 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2596 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2597 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2600 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
2601 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
2602 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
2603 /* instruction to access the cache */
2605 case OPC2_32_BO_CACHEA_WI_POSTINC
:
2606 case OPC2_32_BO_CACHEA_W_POSTINC
:
2607 case OPC2_32_BO_CACHEA_I_POSTINC
:
2608 /* instruction to access the cache, but we still need to handle
2609 the addressing mode */
2610 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2612 case OPC2_32_BO_CACHEA_WI_PREINC
:
2613 case OPC2_32_BO_CACHEA_W_PREINC
:
2614 case OPC2_32_BO_CACHEA_I_PREINC
:
2615 /* instruction to access the cache, but we still need to handle
2616 the addressing mode */
2617 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2619 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
2620 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
2621 /* TODO: Raise illegal opcode trap,
2622 if !tricore_feature(TRICORE_FEATURE_131) */
2624 case OPC2_32_BO_CACHEI_W_POSTINC
:
2625 case OPC2_32_BO_CACHEI_WI_POSTINC
:
2626 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
2627 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2628 } /* TODO: else raise illegal opcode trap */
2630 case OPC2_32_BO_CACHEI_W_PREINC
:
2631 case OPC2_32_BO_CACHEI_WI_PREINC
:
2632 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
2633 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2634 } /* TODO: else raise illegal opcode trap */
2636 case OPC2_32_BO_ST_A_SHORTOFF
:
2637 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
2639 case OPC2_32_BO_ST_A_POSTINC
:
2640 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2642 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2644 case OPC2_32_BO_ST_A_PREINC
:
2645 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
2647 case OPC2_32_BO_ST_B_SHORTOFF
:
2648 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2650 case OPC2_32_BO_ST_B_POSTINC
:
2651 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2653 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2655 case OPC2_32_BO_ST_B_PREINC
:
2656 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2658 case OPC2_32_BO_ST_D_SHORTOFF
:
2659 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
2662 case OPC2_32_BO_ST_D_POSTINC
:
2663 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
2664 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2666 case OPC2_32_BO_ST_D_PREINC
:
2667 temp
= tcg_temp_new();
2668 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2669 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2670 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2671 tcg_temp_free(temp
);
2673 case OPC2_32_BO_ST_DA_SHORTOFF
:
2674 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2677 case OPC2_32_BO_ST_DA_POSTINC
:
2678 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
2679 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2681 case OPC2_32_BO_ST_DA_PREINC
:
2682 temp
= tcg_temp_new();
2683 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2684 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2685 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2686 tcg_temp_free(temp
);
2688 case OPC2_32_BO_ST_H_SHORTOFF
:
2689 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2691 case OPC2_32_BO_ST_H_POSTINC
:
2692 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2694 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2696 case OPC2_32_BO_ST_H_PREINC
:
2697 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2699 case OPC2_32_BO_ST_Q_SHORTOFF
:
2700 temp
= tcg_temp_new();
2701 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2702 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2703 tcg_temp_free(temp
);
2705 case OPC2_32_BO_ST_Q_POSTINC
:
2706 temp
= tcg_temp_new();
2707 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2708 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
2710 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2711 tcg_temp_free(temp
);
2713 case OPC2_32_BO_ST_Q_PREINC
:
2714 temp
= tcg_temp_new();
2715 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2716 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2717 tcg_temp_free(temp
);
2719 case OPC2_32_BO_ST_W_SHORTOFF
:
2720 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2722 case OPC2_32_BO_ST_W_POSTINC
:
2723 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2725 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2727 case OPC2_32_BO_ST_W_PREINC
:
2728 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2733 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
2739 TCGv temp
, temp2
, temp3
;
2741 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2742 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2743 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2744 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2746 temp
= tcg_temp_new();
2747 temp2
= tcg_temp_new();
2748 temp3
= tcg_const_i32(off10
);
2750 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2751 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2754 case OPC2_32_BO_CACHEA_WI_BR
:
2755 case OPC2_32_BO_CACHEA_W_BR
:
2756 case OPC2_32_BO_CACHEA_I_BR
:
2757 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2759 case OPC2_32_BO_CACHEA_WI_CIRC
:
2760 case OPC2_32_BO_CACHEA_W_CIRC
:
2761 case OPC2_32_BO_CACHEA_I_CIRC
:
2762 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2764 case OPC2_32_BO_ST_A_BR
:
2765 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2766 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2768 case OPC2_32_BO_ST_A_CIRC
:
2769 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2770 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2772 case OPC2_32_BO_ST_B_BR
:
2773 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2774 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2776 case OPC2_32_BO_ST_B_CIRC
:
2777 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2778 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2780 case OPC2_32_BO_ST_D_BR
:
2781 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
2782 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2784 case OPC2_32_BO_ST_D_CIRC
:
2785 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2786 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2787 tcg_gen_addi_tl(temp
, temp
, 4);
2788 tcg_gen_rem_tl(temp
, temp
, temp2
);
2789 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2790 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2791 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2793 case OPC2_32_BO_ST_DA_BR
:
2794 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
2795 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2797 case OPC2_32_BO_ST_DA_CIRC
:
2798 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2799 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2800 tcg_gen_addi_tl(temp
, temp
, 4);
2801 tcg_gen_rem_tl(temp
, temp
, temp2
);
2802 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2803 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
2804 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2806 case OPC2_32_BO_ST_H_BR
:
2807 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2808 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2810 case OPC2_32_BO_ST_H_CIRC
:
2811 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
2812 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2814 case OPC2_32_BO_ST_Q_BR
:
2815 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2816 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
2817 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2819 case OPC2_32_BO_ST_Q_CIRC
:
2820 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2821 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
2822 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2824 case OPC2_32_BO_ST_W_BR
:
2825 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2826 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2828 case OPC2_32_BO_ST_W_CIRC
:
2829 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2830 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2833 tcg_temp_free(temp
);
2834 tcg_temp_free(temp2
);
2835 tcg_temp_free(temp3
);
2838 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
2846 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2847 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2848 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2849 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2852 case OPC2_32_BO_LD_A_SHORTOFF
:
2853 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2855 case OPC2_32_BO_LD_A_POSTINC
:
2856 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2858 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2860 case OPC2_32_BO_LD_A_PREINC
:
2861 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2863 case OPC2_32_BO_LD_B_SHORTOFF
:
2864 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2866 case OPC2_32_BO_LD_B_POSTINC
:
2867 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2869 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2871 case OPC2_32_BO_LD_B_PREINC
:
2872 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2874 case OPC2_32_BO_LD_BU_SHORTOFF
:
2875 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2877 case OPC2_32_BO_LD_BU_POSTINC
:
2878 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2880 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2882 case OPC2_32_BO_LD_BU_PREINC
:
2883 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
2885 case OPC2_32_BO_LD_D_SHORTOFF
:
2886 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
2889 case OPC2_32_BO_LD_D_POSTINC
:
2890 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
2891 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2893 case OPC2_32_BO_LD_D_PREINC
:
2894 temp
= tcg_temp_new();
2895 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2896 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2897 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2898 tcg_temp_free(temp
);
2900 case OPC2_32_BO_LD_DA_SHORTOFF
:
2901 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2904 case OPC2_32_BO_LD_DA_POSTINC
:
2905 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
2906 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2908 case OPC2_32_BO_LD_DA_PREINC
:
2909 temp
= tcg_temp_new();
2910 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2911 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2912 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2913 tcg_temp_free(temp
);
2915 case OPC2_32_BO_LD_H_SHORTOFF
:
2916 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2918 case OPC2_32_BO_LD_H_POSTINC
:
2919 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2921 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2923 case OPC2_32_BO_LD_H_PREINC
:
2924 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
2926 case OPC2_32_BO_LD_HU_SHORTOFF
:
2927 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2929 case OPC2_32_BO_LD_HU_POSTINC
:
2930 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2932 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2934 case OPC2_32_BO_LD_HU_PREINC
:
2935 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2937 case OPC2_32_BO_LD_Q_SHORTOFF
:
2938 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2939 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2941 case OPC2_32_BO_LD_Q_POSTINC
:
2942 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2944 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2945 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2947 case OPC2_32_BO_LD_Q_PREINC
:
2948 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2949 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
2951 case OPC2_32_BO_LD_W_SHORTOFF
:
2952 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2954 case OPC2_32_BO_LD_W_POSTINC
:
2955 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2957 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2959 case OPC2_32_BO_LD_W_PREINC
:
2960 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2965 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
2972 TCGv temp
, temp2
, temp3
;
2974 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2975 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2976 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2977 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2979 temp
= tcg_temp_new();
2980 temp2
= tcg_temp_new();
2981 temp3
= tcg_const_i32(off10
);
2983 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2984 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2988 case OPC2_32_BO_LD_A_BR
:
2989 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2990 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2992 case OPC2_32_BO_LD_A_CIRC
:
2993 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2994 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2996 case OPC2_32_BO_LD_B_BR
:
2997 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
2998 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3000 case OPC2_32_BO_LD_B_CIRC
:
3001 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3002 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3004 case OPC2_32_BO_LD_BU_BR
:
3005 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3006 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3008 case OPC2_32_BO_LD_BU_CIRC
:
3009 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3010 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3012 case OPC2_32_BO_LD_D_BR
:
3013 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3014 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3016 case OPC2_32_BO_LD_D_CIRC
:
3017 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3018 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3019 tcg_gen_addi_tl(temp
, temp
, 4);
3020 tcg_gen_rem_tl(temp
, temp
, temp2
);
3021 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3022 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3023 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3025 case OPC2_32_BO_LD_DA_BR
:
3026 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3027 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3029 case OPC2_32_BO_LD_DA_CIRC
:
3030 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3031 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3032 tcg_gen_addi_tl(temp
, temp
, 4);
3033 tcg_gen_rem_tl(temp
, temp
, temp2
);
3034 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3035 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3036 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3038 case OPC2_32_BO_LD_H_BR
:
3039 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3040 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3042 case OPC2_32_BO_LD_H_CIRC
:
3043 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3044 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3046 case OPC2_32_BO_LD_HU_BR
:
3047 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3048 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3050 case OPC2_32_BO_LD_HU_CIRC
:
3051 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3052 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3054 case OPC2_32_BO_LD_Q_BR
:
3055 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3056 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3057 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3059 case OPC2_32_BO_LD_Q_CIRC
:
3060 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3061 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3062 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3064 case OPC2_32_BO_LD_W_BR
:
3065 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3066 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3068 case OPC2_32_BO_LD_W_CIRC
:
3069 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3070 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3073 tcg_temp_free(temp
);
3074 tcg_temp_free(temp2
);
3075 tcg_temp_free(temp3
);
3078 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
3087 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3088 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3089 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3090 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3093 temp
= tcg_temp_new();
3094 temp2
= tcg_temp_new();
3097 case OPC2_32_BO_LDLCX_SHORTOFF
:
3098 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3099 gen_helper_ldlcx(cpu_env
, temp
);
3101 case OPC2_32_BO_LDMST_SHORTOFF
:
3102 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3103 gen_ldmst(ctx
, r1
, temp
);
3105 case OPC2_32_BO_LDMST_POSTINC
:
3106 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3107 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3109 case OPC2_32_BO_LDMST_PREINC
:
3110 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3111 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3113 case OPC2_32_BO_LDUCX_SHORTOFF
:
3114 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3115 gen_helper_lducx(cpu_env
, temp
);
3117 case OPC2_32_BO_LEA_SHORTOFF
:
3118 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
3120 case OPC2_32_BO_STLCX_SHORTOFF
:
3121 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3122 gen_helper_stlcx(cpu_env
, temp
);
3124 case OPC2_32_BO_STUCX_SHORTOFF
:
3125 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3126 gen_helper_stucx(cpu_env
, temp
);
3128 case OPC2_32_BO_SWAP_W_SHORTOFF
:
3129 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3130 gen_swap(ctx
, r1
, temp
);
3132 case OPC2_32_BO_SWAP_W_POSTINC
:
3133 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3134 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3136 case OPC2_32_BO_SWAP_W_PREINC
:
3137 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3138 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3141 tcg_temp_free(temp
);
3142 tcg_temp_free(temp2
);
3145 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
3152 TCGv temp
, temp2
, temp3
;
3154 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3155 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3156 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3157 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3159 temp
= tcg_temp_new();
3160 temp2
= tcg_temp_new();
3161 temp3
= tcg_const_i32(off10
);
3163 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3164 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3167 case OPC2_32_BO_LDMST_BR
:
3168 gen_ldmst(ctx
, r1
, temp2
);
3169 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3171 case OPC2_32_BO_LDMST_CIRC
:
3172 gen_ldmst(ctx
, r1
, temp2
);
3173 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3175 case OPC2_32_BO_SWAP_W_BR
:
3176 gen_swap(ctx
, r1
, temp2
);
3177 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3179 case OPC2_32_BO_SWAP_W_CIRC
:
3180 gen_swap(ctx
, r1
, temp2
);
3181 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3184 tcg_temp_free(temp
);
3185 tcg_temp_free(temp2
);
3186 tcg_temp_free(temp3
);
3189 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
3195 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
3196 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
3197 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
3200 case OPC1_32_BOL_LD_A_LONGOFF
:
3201 temp
= tcg_temp_new();
3202 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3203 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3204 tcg_temp_free(temp
);
3206 case OPC1_32_BOL_LD_W_LONFOFF
:
3207 temp
= tcg_temp_new();
3208 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3209 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3210 tcg_temp_free(temp
);
3212 case OPC1_32_BOL_LEA_LONGOFF
:
3213 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
3215 case OPC1_32_BOL_ST_A_LONGOFF
:
3216 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3217 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3219 /* raise illegal opcode trap */
3222 case OPC1_32_BOL_ST_W_LONGOFF
:
3223 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3230 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
3237 r2
= MASK_OP_RC_D(ctx
->opcode
);
3238 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3239 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3240 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3242 temp
= tcg_temp_new();
3245 case OPC2_32_RC_AND
:
3246 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3248 case OPC2_32_RC_ANDN
:
3249 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
3251 case OPC2_32_RC_NAND
:
3252 tcg_gen_movi_tl(temp
, const9
);
3253 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
3255 case OPC2_32_RC_NOR
:
3256 tcg_gen_movi_tl(temp
, const9
);
3257 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
3260 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3262 case OPC2_32_RC_ORN
:
3263 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
3266 const9
= sextract32(const9
, 0, 6);
3267 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3269 case OPC2_32_RC_SH_H
:
3270 const9
= sextract32(const9
, 0, 5);
3271 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3273 case OPC2_32_RC_SHA
:
3274 const9
= sextract32(const9
, 0, 6);
3275 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3277 case OPC2_32_RC_SHA_H
:
3278 const9
= sextract32(const9
, 0, 5);
3279 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3281 case OPC2_32_RC_SHAS
:
3282 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3284 case OPC2_32_RC_XNOR
:
3285 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3286 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
3288 case OPC2_32_RC_XOR
:
3289 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3292 tcg_temp_free(temp
);
3295 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
3303 r2
= MASK_OP_RC_D(ctx
->opcode
);
3304 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3305 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
3307 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3309 temp
= tcg_temp_new();
3312 case OPC2_32_RC_ABSDIF
:
3313 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3315 case OPC2_32_RC_ABSDIFS
:
3316 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3318 case OPC2_32_RC_ADD
:
3319 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3321 case OPC2_32_RC_ADDC
:
3322 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3324 case OPC2_32_RC_ADDS
:
3325 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3327 case OPC2_32_RC_ADDS_U
:
3328 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3330 case OPC2_32_RC_ADDX
:
3331 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3333 case OPC2_32_RC_AND_EQ
:
3334 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3335 const9
, &tcg_gen_and_tl
);
3337 case OPC2_32_RC_AND_GE
:
3338 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3339 const9
, &tcg_gen_and_tl
);
3341 case OPC2_32_RC_AND_GE_U
:
3342 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3343 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3344 const9
, &tcg_gen_and_tl
);
3346 case OPC2_32_RC_AND_LT
:
3347 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3348 const9
, &tcg_gen_and_tl
);
3350 case OPC2_32_RC_AND_LT_U
:
3351 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3352 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3353 const9
, &tcg_gen_and_tl
);
3355 case OPC2_32_RC_AND_NE
:
3356 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3357 const9
, &tcg_gen_and_tl
);
3360 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3362 case OPC2_32_RC_EQANY_B
:
3363 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3365 case OPC2_32_RC_EQANY_H
:
3366 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3369 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3371 case OPC2_32_RC_GE_U
:
3372 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3373 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3376 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3378 case OPC2_32_RC_LT_U
:
3379 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3380 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3382 case OPC2_32_RC_MAX
:
3383 tcg_gen_movi_tl(temp
, const9
);
3384 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3385 cpu_gpr_d
[r1
], temp
);
3387 case OPC2_32_RC_MAX_U
:
3388 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
3389 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3390 cpu_gpr_d
[r1
], temp
);
3392 case OPC2_32_RC_MIN
:
3393 tcg_gen_movi_tl(temp
, const9
);
3394 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3395 cpu_gpr_d
[r1
], temp
);
3397 case OPC2_32_RC_MIN_U
:
3398 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
3399 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3400 cpu_gpr_d
[r1
], temp
);
3403 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3405 case OPC2_32_RC_OR_EQ
:
3406 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3407 const9
, &tcg_gen_or_tl
);
3409 case OPC2_32_RC_OR_GE
:
3410 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3411 const9
, &tcg_gen_or_tl
);
3413 case OPC2_32_RC_OR_GE_U
:
3414 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3415 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3416 const9
, &tcg_gen_or_tl
);
3418 case OPC2_32_RC_OR_LT
:
3419 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3420 const9
, &tcg_gen_or_tl
);
3422 case OPC2_32_RC_OR_LT_U
:
3423 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3424 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3425 const9
, &tcg_gen_or_tl
);
3427 case OPC2_32_RC_OR_NE
:
3428 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3429 const9
, &tcg_gen_or_tl
);
3431 case OPC2_32_RC_RSUB
:
3432 tcg_gen_movi_tl(temp
, const9
);
3433 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3435 case OPC2_32_RC_RSUBS
:
3436 tcg_gen_movi_tl(temp
, const9
);
3437 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3439 case OPC2_32_RC_RSUBS_U
:
3440 tcg_gen_movi_tl(temp
, const9
);
3441 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3443 case OPC2_32_RC_SH_EQ
:
3444 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3446 case OPC2_32_RC_SH_GE
:
3447 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3449 case OPC2_32_RC_SH_GE_U
:
3450 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3451 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3453 case OPC2_32_RC_SH_LT
:
3454 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3456 case OPC2_32_RC_SH_LT_U
:
3457 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3458 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3460 case OPC2_32_RC_SH_NE
:
3461 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3463 case OPC2_32_RC_XOR_EQ
:
3464 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3465 const9
, &tcg_gen_xor_tl
);
3467 case OPC2_32_RC_XOR_GE
:
3468 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3469 const9
, &tcg_gen_xor_tl
);
3471 case OPC2_32_RC_XOR_GE_U
:
3472 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3473 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3474 const9
, &tcg_gen_xor_tl
);
3476 case OPC2_32_RC_XOR_LT
:
3477 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3478 const9
, &tcg_gen_xor_tl
);
3480 case OPC2_32_RC_XOR_LT_U
:
3481 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3482 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3483 const9
, &tcg_gen_xor_tl
);
3485 case OPC2_32_RC_XOR_NE
:
3486 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3487 const9
, &tcg_gen_xor_tl
);
3490 tcg_temp_free(temp
);
3493 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
3498 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3499 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3502 case OPC2_32_RC_BISR
:
3503 gen_helper_1arg(bisr
, const9
);
3505 case OPC2_32_RC_SYSCALL
:
3506 /* TODO: Add exception generation */
3511 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
3517 r2
= MASK_OP_RC_D(ctx
->opcode
);
3518 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3519 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
3521 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3524 case OPC2_32_RC_MUL_32
:
3525 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3527 case OPC2_32_RC_MUL_64
:
3528 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
3530 case OPC2_32_RC_MULS_32
:
3531 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3533 case OPC2_32_RC_MUL_U_64
:
3534 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3535 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
3537 case OPC2_32_RC_MULS_U_32
:
3538 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3539 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3545 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3549 int32_t pos
, width
, const4
;
3553 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
3554 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
3555 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
3556 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
3557 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
3558 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
3561 case OPC2_32_RCPW_IMASK
:
3562 /* if pos + width > 31 undefined result */
3563 if (pos
+ width
<= 31) {
3564 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
3565 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
3568 case OPC2_32_RCPW_INSERT
:
3569 /* if pos + width > 32 undefined result */
3570 if (pos
+ width
<= 32) {
3571 temp
= tcg_const_i32(const4
);
3572 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
3573 tcg_temp_free(temp
);
3581 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3585 int32_t width
, const4
;
3587 TCGv temp
, temp2
, temp3
;
3589 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
3590 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
3591 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
3592 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
3593 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
3594 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
3596 temp
= tcg_temp_new();
3597 temp2
= tcg_temp_new();
3600 case OPC2_32_RCRW_IMASK
:
3601 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
3602 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
3603 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
3604 tcg_gen_movi_tl(temp2
, const4
);
3605 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
3607 case OPC2_32_RCRW_INSERT
:
3608 temp3
= tcg_temp_new();
3610 tcg_gen_movi_tl(temp
, width
);
3611 tcg_gen_movi_tl(temp2
, const4
);
3612 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
3613 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
3615 tcg_temp_free(temp3
);
3618 tcg_temp_free(temp
);
3619 tcg_temp_free(temp2
);
3624 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
3632 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3633 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3634 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3635 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3636 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3639 case OPC2_32_RCR_CADD
:
3640 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
3643 case OPC2_32_RCR_CADDN
:
3644 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
3647 case OPC2_32_RCR_SEL
:
3648 temp
= tcg_const_i32(0);
3649 temp2
= tcg_const_i32(const9
);
3650 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r4
], temp
,
3651 cpu_gpr_d
[r1
], temp2
);
3652 tcg_temp_free(temp
);
3653 tcg_temp_free(temp2
);
3655 case OPC2_32_RCR_SELN
:
3656 temp
= tcg_const_i32(0);
3657 temp2
= tcg_const_i32(const9
);
3658 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r4
], temp
,
3659 cpu_gpr_d
[r1
], temp2
);
3660 tcg_temp_free(temp
);
3661 tcg_temp_free(temp2
);
3666 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
3673 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3674 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3675 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3676 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3677 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3680 case OPC2_32_RCR_MADD_32
:
3681 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3683 case OPC2_32_RCR_MADD_64
:
3684 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3685 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3687 case OPC2_32_RCR_MADDS_32
:
3688 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3690 case OPC2_32_RCR_MADDS_64
:
3691 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3692 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3694 case OPC2_32_RCR_MADD_U_64
:
3695 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3696 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3697 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3699 case OPC2_32_RCR_MADDS_U_32
:
3700 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3701 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3703 case OPC2_32_RCR_MADDS_U_64
:
3704 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3705 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3706 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3711 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
3718 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3719 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3720 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3721 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3722 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3725 case OPC2_32_RCR_MSUB_32
:
3726 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3728 case OPC2_32_RCR_MSUB_64
:
3729 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3730 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3732 case OPC2_32_RCR_MSUBS_32
:
3733 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3735 case OPC2_32_RCR_MSUBS_64
:
3736 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3737 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3739 case OPC2_32_RCR_MSUB_U_64
:
3740 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3741 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3742 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3744 case OPC2_32_RCR_MSUBS_U_32
:
3745 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3746 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3748 case OPC2_32_RCR_MSUBS_U_64
:
3749 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3750 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3751 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3758 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
3764 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
3765 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
3766 r2
= MASK_OP_RLC_D(ctx
->opcode
);
3769 case OPC1_32_RLC_ADDI
:
3770 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
3772 case OPC1_32_RLC_ADDIH
:
3773 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
3775 case OPC1_32_RLC_ADDIH_A
:
3776 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
3778 case OPC1_32_RLC_MFCR
:
3779 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
3781 case OPC1_32_RLC_MOV
:
3782 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
3784 case OPC1_32_RLC_MOV_U
:
3785 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
3786 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
3788 case OPC1_32_RLC_MOV_H
:
3789 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
3791 case OPC1_32_RLC_MOVH_A
:
3792 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
3794 case OPC1_32_RLC_MTCR
:
3795 gen_mtcr(env
, ctx
, cpu_gpr_d
[r2
], const16
);
3800 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3804 int32_t address
, const16
;
3807 TCGv temp
, temp2
, temp3
;
3809 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3811 /* handle JNZ.T opcode only being 6 bit long */
3812 if (unlikely((op1
& 0x3f) == OPCM_32_BRN_JTT
)) {
3813 op1
= OPCM_32_BRN_JTT
;
3818 case OPCM_32_ABS_LDW
:
3819 decode_abs_ldw(env
, ctx
);
3821 case OPCM_32_ABS_LDB
:
3822 decode_abs_ldb(env
, ctx
);
3824 case OPCM_32_ABS_LDMST_SWAP
:
3825 decode_abs_ldst_swap(env
, ctx
);
3827 case OPCM_32_ABS_LDST_CONTEXT
:
3828 decode_abs_ldst_context(env
, ctx
);
3830 case OPCM_32_ABS_STORE
:
3831 decode_abs_store(env
, ctx
);
3833 case OPCM_32_ABS_STOREB_H
:
3834 decode_abs_storeb_h(env
, ctx
);
3836 case OPC1_32_ABS_STOREQ
:
3837 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3838 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3839 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3840 temp2
= tcg_temp_new();
3842 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
3843 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
3845 tcg_temp_free(temp2
);
3846 tcg_temp_free(temp
);
3848 case OPC1_32_ABS_LD_Q
:
3849 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3850 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3851 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3853 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3854 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3856 tcg_temp_free(temp
);
3858 case OPC1_32_ABS_LEA
:
3859 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3860 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3861 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
3864 case OPC1_32_ABSB_ST_T
:
3865 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3866 b
= MASK_OP_ABSB_B(ctx
->opcode
);
3867 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
3869 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3870 temp2
= tcg_temp_new();
3872 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
3873 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
3874 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
3875 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
3877 tcg_temp_free(temp
);
3878 tcg_temp_free(temp2
);
3881 case OPC1_32_B_CALL
:
3882 case OPC1_32_B_CALLA
:
3887 address
= MASK_OP_B_DISP24(ctx
->opcode
);
3888 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3891 case OPCM_32_BIT_ANDACC
:
3892 decode_bit_andacc(env
, ctx
);
3894 case OPCM_32_BIT_LOGICAL_T1
:
3895 decode_bit_logical_t(env
, ctx
);
3897 case OPCM_32_BIT_INSERT
:
3898 decode_bit_insert(env
, ctx
);
3900 case OPCM_32_BIT_LOGICAL_T2
:
3901 decode_bit_logical_t2(env
, ctx
);
3903 case OPCM_32_BIT_ORAND
:
3904 decode_bit_orand(env
, ctx
);
3906 case OPCM_32_BIT_SH_LOGIC1
:
3907 decode_bit_sh_logic1(env
, ctx
);
3909 case OPCM_32_BIT_SH_LOGIC2
:
3910 decode_bit_sh_logic2(env
, ctx
);
3913 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
3914 decode_bo_addrmode_post_pre_base(env
, ctx
);
3916 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
3917 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
3919 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
3920 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
3922 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
3923 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
3925 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
3926 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
3928 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
3929 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
3932 case OPC1_32_BOL_LD_A_LONGOFF
:
3933 case OPC1_32_BOL_LD_W_LONFOFF
:
3934 case OPC1_32_BOL_LEA_LONGOFF
:
3935 case OPC1_32_BOL_ST_W_LONGOFF
:
3936 case OPC1_32_BOL_ST_A_LONGOFF
:
3937 decode_bol_opc(env
, ctx
, op1
);
3940 case OPCM_32_BRC_EQ_NEQ
:
3941 case OPCM_32_BRC_GE
:
3942 case OPCM_32_BRC_JLT
:
3943 case OPCM_32_BRC_JNE
:
3944 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
3945 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
3946 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
3947 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
3950 case OPCM_32_BRN_JTT
:
3951 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
3952 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
3953 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3956 case OPCM_32_BRR_EQ_NEQ
:
3957 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3958 case OPCM_32_BRR_GE
:
3959 case OPCM_32_BRR_JLT
:
3960 case OPCM_32_BRR_JNE
:
3961 case OPCM_32_BRR_JNZ
:
3962 case OPCM_32_BRR_LOOP
:
3963 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
3964 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
3965 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
3966 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
3969 case OPCM_32_RC_LOGICAL_SHIFT
:
3970 decode_rc_logical_shift(env
, ctx
);
3972 case OPCM_32_RC_ACCUMULATOR
:
3973 decode_rc_accumulator(env
, ctx
);
3975 case OPCM_32_RC_SERVICEROUTINE
:
3976 decode_rc_serviceroutine(env
, ctx
);
3978 case OPCM_32_RC_MUL
:
3979 decode_rc_mul(env
, ctx
);
3982 case OPCM_32_RCPW_MASK_INSERT
:
3983 decode_rcpw_insert(env
, ctx
);
3986 case OPC1_32_RCRR_INSERT
:
3987 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
3988 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
3989 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
3990 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
3991 temp
= tcg_const_i32(const16
);
3992 temp2
= tcg_temp_new(); /* width*/
3993 temp3
= tcg_temp_new(); /* pos */
3995 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
3996 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
3998 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
4000 tcg_temp_free(temp
);
4001 tcg_temp_free(temp2
);
4002 tcg_temp_free(temp3
);
4005 case OPCM_32_RCRW_MASK_INSERT
:
4006 decode_rcrw_insert(env
, ctx
);
4009 case OPCM_32_RCR_COND_SELECT
:
4010 decode_rcr_cond_select(env
, ctx
);
4012 case OPCM_32_RCR_MADD
:
4013 decode_rcr_madd(env
, ctx
);
4015 case OPCM_32_RCR_MSUB
:
4016 decode_rcr_msub(env
, ctx
);
4019 case OPC1_32_RLC_ADDI
:
4020 case OPC1_32_RLC_ADDIH
:
4021 case OPC1_32_RLC_ADDIH_A
:
4022 case OPC1_32_RLC_MFCR
:
4023 case OPC1_32_RLC_MOV
:
4024 case OPC1_32_RLC_MOV_U
:
4025 case OPC1_32_RLC_MOV_H
:
4026 case OPC1_32_RLC_MOVH_A
:
4027 case OPC1_32_RLC_MTCR
:
4028 decode_rlc_opc(env
, ctx
, op1
);
4033 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
4035 /* 16-Bit Instruction */
4036 if ((ctx
->opcode
& 0x1) == 0) {
4037 ctx
->next_pc
= ctx
->pc
+ 2;
4038 decode_16Bit_opc(env
, ctx
);
4039 /* 32-Bit Instruction */
4041 ctx
->next_pc
= ctx
->pc
+ 4;
4042 decode_32Bit_opc(env
, ctx
);
4047 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
4050 CPUState
*cs
= CPU(cpu
);
4051 CPUTriCoreState
*env
= &cpu
->env
;
4053 target_ulong pc_start
;
4055 uint16_t *gen_opc_end
;
4058 qemu_log("search pc %d\n", search_pc
);
4063 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
4067 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
4068 ctx
.bstate
= BS_NONE
;
4069 ctx
.mem_idx
= cpu_mmu_index(env
);
4071 tcg_clear_temp_count();
4073 while (ctx
.bstate
== BS_NONE
) {
4074 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
4075 decode_opc(env
, &ctx
, 0);
4079 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
4080 gen_save_pc(ctx
.next_pc
);
4085 gen_save_pc(ctx
.next_pc
);
4089 ctx
.pc
= ctx
.next_pc
;
4092 gen_tb_end(tb
, num_insns
);
4093 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
4095 printf("done_generating search pc\n");
4097 tb
->size
= ctx
.pc
- pc_start
;
4098 tb
->icount
= num_insns
;
4100 if (tcg_check_temp_count()) {
4101 printf("LEAK at %08x\n", env
->PC
);
4105 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
4106 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
4107 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
4114 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
4116 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
4120 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
4122 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
4126 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
4128 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
4136 void cpu_state_reset(CPUTriCoreState
*env
)
4138 /* Reset Regs to Default Value */
4142 static void tricore_tcg_init_csfr(void)
4144 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
4145 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
4146 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
4147 offsetof(CPUTriCoreState
, PSW
), "PSW");
4148 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
4149 offsetof(CPUTriCoreState
, PC
), "PC");
4150 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
4151 offsetof(CPUTriCoreState
, ICR
), "ICR");
4154 void tricore_tcg_init(void)
4161 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
4163 for (i
= 0 ; i
< 16 ; i
++) {
4164 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
4165 offsetof(CPUTriCoreState
, gpr_a
[i
]),
4168 for (i
= 0 ; i
< 16 ; i
++) {
4169 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
4170 offsetof(CPUTriCoreState
, gpr_d
[i
]),
4173 tricore_tcg_init_csfr();
4174 /* init PSW flag cache */
4175 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
4176 offsetof(CPUTriCoreState
, PSW_USB_C
),
4178 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
4179 offsetof(CPUTriCoreState
, PSW_USB_V
),
4181 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
4182 offsetof(CPUTriCoreState
, PSW_USB_SV
),
4184 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
4185 offsetof(CPUTriCoreState
, PSW_USB_AV
),
4187 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
4188 offsetof(CPUTriCoreState
, PSW_USB_SAV
),