2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
83 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
84 fprintf_function cpu_fprintf
, int flags
)
86 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
87 CPUTriCoreState
*env
= &cpu
->env
;
93 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
94 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
95 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
96 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
97 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
98 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
100 for (i
= 0; i
< 16; ++i
) {
102 cpu_fprintf(f
, "\nGPR A%02d:", i
);
104 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
106 for (i
= 0; i
< 16; ++i
) {
108 cpu_fprintf(f
, "\nGPR D%02d:", i
);
110 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
112 cpu_fprintf(f
, "\n");
116 * Functions to generate micro-ops
119 /* Makros for generating helpers */
121 #define gen_helper_1arg(name, arg) do { \
122 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
123 gen_helper_##name(cpu_env, helper_tmp); \
124 tcg_temp_free_i32(helper_tmp); \
127 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
128 TCGv arg00 = tcg_temp_new(); \
129 TCGv arg01 = tcg_temp_new(); \
130 TCGv arg11 = tcg_temp_new(); \
131 tcg_gen_sari_tl(arg00, arg0, 16); \
132 tcg_gen_ext16s_tl(arg01, arg0); \
133 tcg_gen_ext16s_tl(arg11, arg1); \
134 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
135 tcg_temp_free(arg00); \
136 tcg_temp_free(arg01); \
137 tcg_temp_free(arg11); \
140 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
141 TCGv arg00 = tcg_temp_new(); \
142 TCGv arg01 = tcg_temp_new(); \
143 TCGv arg10 = tcg_temp_new(); \
144 TCGv arg11 = tcg_temp_new(); \
145 tcg_gen_sari_tl(arg00, arg0, 16); \
146 tcg_gen_ext16s_tl(arg01, arg0); \
147 tcg_gen_sari_tl(arg11, arg1, 16); \
148 tcg_gen_ext16s_tl(arg10, arg1); \
149 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
150 tcg_temp_free(arg00); \
151 tcg_temp_free(arg01); \
152 tcg_temp_free(arg10); \
153 tcg_temp_free(arg11); \
156 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
157 TCGv arg00 = tcg_temp_new(); \
158 TCGv arg01 = tcg_temp_new(); \
159 TCGv arg10 = tcg_temp_new(); \
160 TCGv arg11 = tcg_temp_new(); \
161 tcg_gen_sari_tl(arg00, arg0, 16); \
162 tcg_gen_ext16s_tl(arg01, arg0); \
163 tcg_gen_sari_tl(arg10, arg1, 16); \
164 tcg_gen_ext16s_tl(arg11, arg1); \
165 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
166 tcg_temp_free(arg00); \
167 tcg_temp_free(arg01); \
168 tcg_temp_free(arg10); \
169 tcg_temp_free(arg11); \
172 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
173 TCGv arg00 = tcg_temp_new(); \
174 TCGv arg01 = tcg_temp_new(); \
175 TCGv arg11 = tcg_temp_new(); \
176 tcg_gen_sari_tl(arg01, arg0, 16); \
177 tcg_gen_ext16s_tl(arg00, arg0); \
178 tcg_gen_sari_tl(arg11, arg1, 16); \
179 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
180 tcg_temp_free(arg00); \
181 tcg_temp_free(arg01); \
182 tcg_temp_free(arg11); \
185 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
186 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
187 ((offset & 0x0fffff) << 1))
189 /* Functions for load/save to/from memory */
191 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
192 int16_t con
, TCGMemOp mop
)
194 TCGv temp
= tcg_temp_new();
195 tcg_gen_addi_tl(temp
, r2
, con
);
196 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
200 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
201 int16_t con
, TCGMemOp mop
)
203 TCGv temp
= tcg_temp_new();
204 tcg_gen_addi_tl(temp
, r2
, con
);
205 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
209 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
211 TCGv_i64 temp
= tcg_temp_new_i64();
213 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
214 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
216 tcg_temp_free_i64(temp
);
219 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, base
, con
);
224 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
228 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
233 /* write back to two 32 bit regs */
234 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
236 tcg_temp_free_i64(temp
);
239 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
242 TCGv temp
= tcg_temp_new();
243 tcg_gen_addi_tl(temp
, base
, con
);
244 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
248 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
251 TCGv temp
= tcg_temp_new();
252 tcg_gen_addi_tl(temp
, r2
, off
);
253 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
254 tcg_gen_mov_tl(r2
, temp
);
258 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, r2
, off
);
263 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
264 tcg_gen_mov_tl(r2
, temp
);
268 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
269 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
271 TCGv temp
= tcg_temp_new();
272 TCGv temp2
= tcg_temp_new();
274 /* temp = (M(EA, word) */
275 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
276 /* temp = temp & ~E[a][63:32]) */
277 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
278 /* temp2 = (E[a][31:0] & E[a][63:32]); */
279 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
280 /* temp = temp | temp2; */
281 tcg_gen_or_tl(temp
, temp
, temp2
);
282 /* M(EA, word) = temp; */
283 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
286 tcg_temp_free(temp2
);
289 /* tmp = M(EA, word);
292 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
294 TCGv temp
= tcg_temp_new();
296 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
297 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
298 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
303 /* We generate loads and store to core special function register (csfr) through
304 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
305 makros R, A and E, which allow read-only, all and endinit protected access.
306 These makros also specify in which ISA version the csfr was introduced. */
307 #define R(ADDRESS, REG, FEATURE) \
309 if (tricore_feature(env, FEATURE)) { \
310 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
313 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
314 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
315 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
317 /* since we're caching PSW make this a special case */
318 if (offset
== 0xfe04) {
319 gen_helper_psw_read(ret
, cpu_env
);
330 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
331 since no execption occurs */
332 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
334 if (tricore_feature(env, FEATURE)) { \
335 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
338 /* Endinit protected registers
339 TODO: Since the endinit bit is in a register of a not yet implemented
340 watchdog device, we handle endinit protected registers like
341 all-access registers for now. */
342 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
343 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
346 if (ctx
->hflags
& TRICORE_HFLAG_SM
) {
347 /* since we're caching PSW make this a special case */
348 if (offset
== 0xfe04) {
349 gen_helper_psw_write(cpu_env
, r1
);
356 /* generate privilege trap */
360 /* Functions for arithmetic instructions */
362 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
364 TCGv t0
= tcg_temp_new_i32();
365 TCGv result
= tcg_temp_new_i32();
366 /* Addition and set V/SV bits */
367 tcg_gen_add_tl(result
, r1
, r2
);
369 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
370 tcg_gen_xor_tl(t0
, r1
, r2
);
371 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
373 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
374 /* Calc AV/SAV bits */
375 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
376 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
378 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
379 /* write back result */
380 tcg_gen_mov_tl(ret
, result
);
382 tcg_temp_free(result
);
386 /* ret = r2 + (r1 * r3); */
387 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
389 TCGv_i64 t1
= tcg_temp_new_i64();
390 TCGv_i64 t2
= tcg_temp_new_i64();
391 TCGv_i64 t3
= tcg_temp_new_i64();
393 tcg_gen_ext_i32_i64(t1
, r1
);
394 tcg_gen_ext_i32_i64(t2
, r2
);
395 tcg_gen_ext_i32_i64(t3
, r3
);
397 tcg_gen_mul_i64(t1
, t1
, t3
);
398 tcg_gen_add_i64(t1
, t2
, t1
);
400 tcg_gen_trunc_i64_i32(ret
, t1
);
403 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
404 /* t1 < -0x80000000 */
405 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
406 tcg_gen_or_i64(t2
, t2
, t3
);
407 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
408 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
410 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
411 /* Calc AV/SAV bits */
412 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
413 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
415 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
417 tcg_temp_free_i64(t1
);
418 tcg_temp_free_i64(t2
);
419 tcg_temp_free_i64(t3
);
422 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
424 TCGv temp
= tcg_const_i32(con
);
425 gen_madd32_d(ret
, r1
, r2
, temp
);
430 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
433 TCGv t1
= tcg_temp_new();
434 TCGv t2
= tcg_temp_new();
435 TCGv t3
= tcg_temp_new();
436 TCGv t4
= tcg_temp_new();
438 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
439 /* only the add can overflow */
440 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
442 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
443 tcg_gen_xor_tl(t1
, r2_high
, t2
);
444 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
446 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
447 /* Calc AV/SAV bits */
448 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
449 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
451 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
452 /* write back the result */
453 tcg_gen_mov_tl(ret_low
, t3
);
454 tcg_gen_mov_tl(ret_high
, t4
);
463 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
466 TCGv_i64 t1
= tcg_temp_new_i64();
467 TCGv_i64 t2
= tcg_temp_new_i64();
468 TCGv_i64 t3
= tcg_temp_new_i64();
470 tcg_gen_extu_i32_i64(t1
, r1
);
471 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
472 tcg_gen_extu_i32_i64(t3
, r3
);
474 tcg_gen_mul_i64(t1
, t1
, t3
);
475 tcg_gen_add_i64(t2
, t2
, t1
);
476 /* write back result */
477 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
478 /* only the add overflows, if t2 < t1
480 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
481 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
482 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
484 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
485 /* Calc AV/SAV bits */
486 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
487 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
489 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
491 tcg_temp_free_i64(t1
);
492 tcg_temp_free_i64(t2
);
493 tcg_temp_free_i64(t3
);
497 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
500 TCGv temp
= tcg_const_i32(con
);
501 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
506 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
509 TCGv temp
= tcg_const_i32(con
);
510 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
514 /* ret = r2 - (r1 * r3); */
515 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
517 TCGv_i64 t1
= tcg_temp_new_i64();
518 TCGv_i64 t2
= tcg_temp_new_i64();
519 TCGv_i64 t3
= tcg_temp_new_i64();
521 tcg_gen_ext_i32_i64(t1
, r1
);
522 tcg_gen_ext_i32_i64(t2
, r2
);
523 tcg_gen_ext_i32_i64(t3
, r3
);
525 tcg_gen_mul_i64(t1
, t1
, t3
);
526 tcg_gen_sub_i64(t1
, t2
, t1
);
528 tcg_gen_trunc_i64_i32(ret
, t1
);
531 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
532 /* result < -0x80000000 */
533 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
534 tcg_gen_or_i64(t2
, t2
, t3
);
535 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
536 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
539 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
540 /* Calc AV/SAV bits */
541 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
542 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
544 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
546 tcg_temp_free_i64(t1
);
547 tcg_temp_free_i64(t2
);
548 tcg_temp_free_i64(t3
);
551 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
553 TCGv temp
= tcg_const_i32(con
);
554 gen_msub32_d(ret
, r1
, r2
, temp
);
559 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
562 TCGv t1
= tcg_temp_new();
563 TCGv t2
= tcg_temp_new();
564 TCGv t3
= tcg_temp_new();
565 TCGv t4
= tcg_temp_new();
567 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
568 /* only the sub can overflow */
569 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
571 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
572 tcg_gen_xor_tl(t1
, r2_high
, t2
);
573 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
575 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
576 /* Calc AV/SAV bits */
577 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
578 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
580 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
581 /* write back the result */
582 tcg_gen_mov_tl(ret_low
, t3
);
583 tcg_gen_mov_tl(ret_high
, t4
);
592 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
595 TCGv temp
= tcg_const_i32(con
);
596 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
601 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
604 TCGv_i64 t1
= tcg_temp_new_i64();
605 TCGv_i64 t2
= tcg_temp_new_i64();
606 TCGv_i64 t3
= tcg_temp_new_i64();
608 tcg_gen_extu_i32_i64(t1
, r1
);
609 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
610 tcg_gen_extu_i32_i64(t3
, r3
);
612 tcg_gen_mul_i64(t1
, t1
, t3
);
613 tcg_gen_sub_i64(t3
, t2
, t1
);
614 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
615 /* calc V bit, only the sub can overflow, if t1 > t2 */
616 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
617 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
618 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
620 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
621 /* Calc AV/SAV bits */
622 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
623 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
625 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
627 tcg_temp_free_i64(t1
);
628 tcg_temp_free_i64(t2
);
629 tcg_temp_free_i64(t3
);
633 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
636 TCGv temp
= tcg_const_i32(con
);
637 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
641 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
643 TCGv temp
= tcg_const_i32(r2
);
644 gen_add_d(ret
, r1
, temp
);
647 /* calculate the carry bit too */
648 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
650 TCGv t0
= tcg_temp_new_i32();
651 TCGv result
= tcg_temp_new_i32();
653 tcg_gen_movi_tl(t0
, 0);
654 /* Addition and set C/V/SV bits */
655 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
657 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
658 tcg_gen_xor_tl(t0
, r1
, r2
);
659 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
661 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
662 /* Calc AV/SAV bits */
663 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
664 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
666 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
667 /* write back result */
668 tcg_gen_mov_tl(ret
, result
);
670 tcg_temp_free(result
);
674 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
676 TCGv temp
= tcg_const_i32(con
);
677 gen_add_CC(ret
, r1
, temp
);
681 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
683 TCGv carry
= tcg_temp_new_i32();
684 TCGv t0
= tcg_temp_new_i32();
685 TCGv result
= tcg_temp_new_i32();
687 tcg_gen_movi_tl(t0
, 0);
688 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
689 /* Addition, carry and set C/V/SV bits */
690 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
691 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
693 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
694 tcg_gen_xor_tl(t0
, r1
, r2
);
695 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
697 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
698 /* Calc AV/SAV bits */
699 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
700 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
702 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
703 /* write back result */
704 tcg_gen_mov_tl(ret
, result
);
706 tcg_temp_free(result
);
708 tcg_temp_free(carry
);
711 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
713 TCGv temp
= tcg_const_i32(con
);
714 gen_addc_CC(ret
, r1
, temp
);
718 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
721 TCGv temp
= tcg_temp_new();
722 TCGv temp2
= tcg_temp_new();
723 TCGv result
= tcg_temp_new();
724 TCGv mask
= tcg_temp_new();
725 TCGv t0
= tcg_const_i32(0);
727 /* create mask for sticky bits */
728 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
729 tcg_gen_shli_tl(mask
, mask
, 31);
731 tcg_gen_add_tl(result
, r1
, r2
);
733 tcg_gen_xor_tl(temp
, result
, r1
);
734 tcg_gen_xor_tl(temp2
, r1
, r2
);
735 tcg_gen_andc_tl(temp
, temp
, temp2
);
736 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
738 tcg_gen_and_tl(temp
, temp
, mask
);
739 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
741 tcg_gen_add_tl(temp
, result
, result
);
742 tcg_gen_xor_tl(temp
, temp
, result
);
743 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
745 tcg_gen_and_tl(temp
, temp
, mask
);
746 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
747 /* write back result */
748 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r3
);
752 tcg_temp_free(temp2
);
753 tcg_temp_free(result
);
757 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
760 TCGv temp
= tcg_const_i32(r2
);
761 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
765 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
767 TCGv temp
= tcg_temp_new_i32();
768 TCGv result
= tcg_temp_new_i32();
770 tcg_gen_sub_tl(result
, r1
, r2
);
772 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
773 tcg_gen_xor_tl(temp
, r1
, r2
);
774 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
776 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
778 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
779 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
781 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
782 /* write back result */
783 tcg_gen_mov_tl(ret
, result
);
786 tcg_temp_free(result
);
789 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
791 TCGv result
= tcg_temp_new();
792 TCGv temp
= tcg_temp_new();
794 tcg_gen_sub_tl(result
, r1
, r2
);
796 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
798 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
799 tcg_gen_xor_tl(temp
, r1
, r2
);
800 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
802 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
804 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
805 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
807 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
808 /* write back result */
809 tcg_gen_mov_tl(ret
, result
);
811 tcg_temp_free(result
);
815 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
817 TCGv temp
= tcg_temp_new();
818 tcg_gen_not_tl(temp
, r2
);
819 gen_addc_CC(ret
, r1
, temp
);
823 static inline void gen_abs(TCGv ret
, TCGv r1
)
825 TCGv temp
= tcg_temp_new();
826 TCGv t0
= tcg_const_i32(0);
828 tcg_gen_neg_tl(temp
, r1
);
829 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
830 /* overflow can only happen, if r1 = 0x80000000 */
831 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
832 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
834 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
836 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
837 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
839 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
845 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
847 TCGv temp
= tcg_temp_new_i32();
848 TCGv result
= tcg_temp_new_i32();
850 tcg_gen_sub_tl(result
, r1
, r2
);
851 tcg_gen_sub_tl(temp
, r2
, r1
);
852 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
855 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
856 tcg_gen_xor_tl(temp
, result
, r2
);
857 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
858 tcg_gen_xor_tl(temp
, r1
, r2
);
859 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
861 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
863 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
864 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
866 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
867 /* write back result */
868 tcg_gen_mov_tl(ret
, result
);
871 tcg_temp_free(result
);
874 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
876 TCGv temp
= tcg_const_i32(con
);
877 gen_absdif(ret
, r1
, temp
);
881 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
883 TCGv temp
= tcg_const_i32(con
);
884 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
888 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
890 TCGv high
= tcg_temp_new();
891 TCGv low
= tcg_temp_new();
893 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
894 tcg_gen_mov_tl(ret
, low
);
896 tcg_gen_sari_tl(low
, low
, 31);
897 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
898 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
900 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
902 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
903 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
905 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
911 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
913 TCGv temp
= tcg_const_i32(con
);
914 gen_mul_i32s(ret
, r1
, temp
);
918 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
920 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
922 tcg_gen_movi_tl(cpu_PSW_V
, 0);
924 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
926 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
927 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
929 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
932 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
935 TCGv temp
= tcg_const_i32(con
);
936 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
940 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
942 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
944 tcg_gen_movi_tl(cpu_PSW_V
, 0);
946 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
948 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
949 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
951 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
954 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
957 TCGv temp
= tcg_const_i32(con
);
958 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
962 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
964 TCGv temp
= tcg_const_i32(con
);
965 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
969 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
971 TCGv temp
= tcg_const_i32(con
);
972 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
975 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
976 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
978 TCGv temp
= tcg_const_i32(con
);
979 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
983 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
985 TCGv temp
= tcg_const_i32(con
);
986 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
991 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
994 TCGv temp
= tcg_const_i32(con
);
995 TCGv_i64 temp64
= tcg_temp_new_i64();
996 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
997 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, temp
);
998 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1000 tcg_temp_free_i64(temp64
);
1004 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1007 TCGv temp
= tcg_const_i32(con
);
1008 TCGv_i64 temp64
= tcg_temp_new_i64();
1009 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1010 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, temp
);
1011 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1012 tcg_temp_free(temp
);
1013 tcg_temp_free_i64(temp64
);
1016 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1018 TCGv temp
= tcg_const_i32(con
);
1019 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
1020 tcg_temp_free(temp
);
1023 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1025 TCGv temp
= tcg_const_i32(con
);
1026 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
1027 tcg_temp_free(temp
);
1031 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1034 TCGv temp
= tcg_const_i32(con
);
1035 TCGv_i64 temp64
= tcg_temp_new_i64();
1036 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1037 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, temp
);
1038 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1039 tcg_temp_free(temp
);
1040 tcg_temp_free_i64(temp64
);
1044 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1047 TCGv temp
= tcg_const_i32(con
);
1048 TCGv_i64 temp64
= tcg_temp_new_i64();
1049 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1050 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, temp
);
1051 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1052 tcg_temp_free(temp
);
1053 tcg_temp_free_i64(temp64
);
1056 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
1058 TCGv sat_neg
= tcg_const_i32(low
);
1059 TCGv temp
= tcg_const_i32(up
);
1061 /* sat_neg = (arg < low ) ? low : arg; */
1062 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
1064 /* ret = (sat_neg > up ) ? up : sat_neg; */
1065 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
1067 tcg_temp_free(sat_neg
);
1068 tcg_temp_free(temp
);
1071 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
1073 TCGv temp
= tcg_const_i32(up
);
1074 /* sat_neg = (arg > up ) ? up : arg; */
1075 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
1076 tcg_temp_free(temp
);
1079 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1081 if (shift_count
== -32) {
1082 tcg_gen_movi_tl(ret
, 0);
1083 } else if (shift_count
>= 0) {
1084 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1086 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
1090 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
1092 TCGv temp_low
, temp_high
;
1094 if (shiftcount
== -16) {
1095 tcg_gen_movi_tl(ret
, 0);
1097 temp_high
= tcg_temp_new();
1098 temp_low
= tcg_temp_new();
1100 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
1101 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
1102 gen_shi(temp_low
, temp_low
, shiftcount
);
1103 gen_shi(ret
, temp_high
, shiftcount
);
1104 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
1106 tcg_temp_free(temp_low
);
1107 tcg_temp_free(temp_high
);
1111 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
1113 uint32_t msk
, msk_start
;
1114 TCGv temp
= tcg_temp_new();
1115 TCGv temp2
= tcg_temp_new();
1116 TCGv t_0
= tcg_const_i32(0);
1118 if (shift_count
== 0) {
1119 /* Clear PSW.C and PSW.V */
1120 tcg_gen_movi_tl(cpu_PSW_C
, 0);
1121 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
1122 tcg_gen_mov_tl(ret
, r1
);
1123 } else if (shift_count
== -32) {
1125 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
1126 /* fill ret completly with sign bit */
1127 tcg_gen_sari_tl(ret
, r1
, 31);
1129 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1130 } else if (shift_count
> 0) {
1131 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
1132 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
1135 msk_start
= 32 - shift_count
;
1136 msk
= ((1 << shift_count
) - 1) << msk_start
;
1137 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1138 /* calc v/sv bits */
1139 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
1140 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
1141 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
1142 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1144 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
1146 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1148 tcg_temp_free(t_max
);
1149 tcg_temp_free(t_min
);
1152 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1154 msk
= (1 << -shift_count
) - 1;
1155 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1157 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1159 /* calc av overflow bit */
1160 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1161 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1162 /* calc sav overflow bit */
1163 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1165 tcg_temp_free(temp
);
1166 tcg_temp_free(temp2
);
1170 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
1172 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
1175 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
1177 TCGv temp
= tcg_const_i32(con
);
1178 gen_shas(ret
, r1
, temp
);
1179 tcg_temp_free(temp
);
1182 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1186 if (shift_count
== 0) {
1187 tcg_gen_mov_tl(ret
, r1
);
1188 } else if (shift_count
> 0) {
1189 low
= tcg_temp_new();
1190 high
= tcg_temp_new();
1192 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
1193 tcg_gen_shli_tl(low
, r1
, shift_count
);
1194 tcg_gen_shli_tl(ret
, high
, shift_count
);
1195 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1198 tcg_temp_free(high
);
1200 low
= tcg_temp_new();
1201 high
= tcg_temp_new();
1203 tcg_gen_ext16s_tl(low
, r1
);
1204 tcg_gen_sari_tl(low
, low
, -shift_count
);
1205 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1206 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1209 tcg_temp_free(high
);
1214 /* ret = {ret[30:0], (r1 cond r2)}; */
1215 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1217 TCGv temp
= tcg_temp_new();
1218 TCGv temp2
= tcg_temp_new();
1220 tcg_gen_shli_tl(temp
, ret
, 1);
1221 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
1222 tcg_gen_or_tl(ret
, temp
, temp2
);
1224 tcg_temp_free(temp
);
1225 tcg_temp_free(temp2
);
1228 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
1230 TCGv temp
= tcg_const_i32(con
);
1231 gen_sh_cond(cond
, ret
, r1
, temp
);
1232 tcg_temp_free(temp
);
1235 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
1237 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
1240 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
1242 TCGv temp
= tcg_const_i32(con
);
1243 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
1244 tcg_temp_free(temp
);
1247 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
1249 TCGv temp
= tcg_const_i32(con
);
1250 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
1251 tcg_temp_free(temp
);
1254 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
1256 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
1259 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
1261 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
1264 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
1266 void(*op1
)(TCGv
, TCGv
, TCGv
),
1267 void(*op2
)(TCGv
, TCGv
, TCGv
))
1271 temp1
= tcg_temp_new();
1272 temp2
= tcg_temp_new();
1274 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1275 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1277 (*op1
)(temp1
, temp1
, temp2
);
1278 (*op2
)(temp1
, ret
, temp1
);
1280 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
1282 tcg_temp_free(temp1
);
1283 tcg_temp_free(temp2
);
1286 /* ret = r1[pos1] op1 r2[pos2]; */
1287 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
1289 void(*op1
)(TCGv
, TCGv
, TCGv
))
1293 temp1
= tcg_temp_new();
1294 temp2
= tcg_temp_new();
1296 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1297 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1299 (*op1
)(ret
, temp1
, temp2
);
1301 tcg_gen_andi_tl(ret
, ret
, 0x1);
1303 tcg_temp_free(temp1
);
1304 tcg_temp_free(temp2
);
1307 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
1308 void(*op
)(TCGv
, TCGv
, TCGv
))
1310 TCGv temp
= tcg_temp_new();
1311 TCGv temp2
= tcg_temp_new();
1312 /* temp = (arg1 cond arg2 )*/
1313 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
1315 tcg_gen_andi_tl(temp2
, ret
, 0x1);
1316 /* temp = temp insn temp2 */
1317 (*op
)(temp
, temp
, temp2
);
1318 /* ret = {ret[31:1], temp} */
1319 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
1321 tcg_temp_free(temp
);
1322 tcg_temp_free(temp2
);
1326 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
1327 void(*op
)(TCGv
, TCGv
, TCGv
))
1329 TCGv temp
= tcg_const_i32(con
);
1330 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
1331 tcg_temp_free(temp
);
1334 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
1335 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1337 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
1338 tcg_gen_neg_tl(ret
, ret
);
1341 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
1343 TCGv b0
= tcg_temp_new();
1344 TCGv b1
= tcg_temp_new();
1345 TCGv b2
= tcg_temp_new();
1346 TCGv b3
= tcg_temp_new();
1349 tcg_gen_andi_tl(b0
, r1
, 0xff);
1350 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
1353 tcg_gen_andi_tl(b1
, r1
, 0xff00);
1354 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
1357 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
1358 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
1361 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
1362 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
1365 tcg_gen_or_tl(ret
, b0
, b1
);
1366 tcg_gen_or_tl(ret
, ret
, b2
);
1367 tcg_gen_or_tl(ret
, ret
, b3
);
1375 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
1377 TCGv h0
= tcg_temp_new();
1378 TCGv h1
= tcg_temp_new();
1381 tcg_gen_andi_tl(h0
, r1
, 0xffff);
1382 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
1385 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
1386 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
1389 tcg_gen_or_tl(ret
, h0
, h1
);
1394 /* mask = ((1 << width) -1) << pos;
1395 ret = (r1 & ~mask) | (r2 << pos) & mask); */
1396 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
1398 TCGv mask
= tcg_temp_new();
1399 TCGv temp
= tcg_temp_new();
1400 TCGv temp2
= tcg_temp_new();
1402 tcg_gen_movi_tl(mask
, 1);
1403 tcg_gen_shl_tl(mask
, mask
, width
);
1404 tcg_gen_subi_tl(mask
, mask
, 1);
1405 tcg_gen_shl_tl(mask
, mask
, pos
);
1407 tcg_gen_shl_tl(temp
, r2
, pos
);
1408 tcg_gen_and_tl(temp
, temp
, mask
);
1409 tcg_gen_andc_tl(temp2
, r1
, mask
);
1410 tcg_gen_or_tl(ret
, temp
, temp2
);
1412 tcg_temp_free(mask
);
1413 tcg_temp_free(temp
);
1414 tcg_temp_free(temp2
);
1417 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
1419 TCGv_i64 temp
= tcg_temp_new_i64();
1421 gen_helper_bsplit(temp
, r1
);
1422 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
1424 tcg_temp_free_i64(temp
);
1427 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
1429 TCGv_i64 temp
= tcg_temp_new_i64();
1431 gen_helper_unpack(temp
, r1
);
1432 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
1434 tcg_temp_free_i64(temp
);
1438 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
1440 TCGv_i64 ret
= tcg_temp_new_i64();
1442 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
1443 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
1445 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
1447 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
1449 tcg_temp_free_i64(ret
);
1453 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
1455 TCGv_i64 ret
= tcg_temp_new_i64();
1457 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
1458 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
1460 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
1462 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
1464 tcg_temp_free_i64(ret
);
1467 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
1469 TCGv temp
= tcg_temp_new();
1471 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
1472 tcg_gen_xor_tl(temp
, temp
, arg_low
);
1473 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
1474 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
1475 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
1477 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1478 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1479 tcg_temp_free(temp
);
1482 static void gen_calc_usb_mulr_h(TCGv arg
)
1484 TCGv temp
= tcg_temp_new();
1486 tcg_gen_add_tl(temp
, arg
, arg
);
1487 tcg_gen_xor_tl(temp
, temp
, arg
);
1488 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
1489 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
1491 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1493 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1494 tcg_temp_free(temp
);
1497 /* helpers for generating program flow micro-ops */
1499 static inline void gen_save_pc(target_ulong pc
)
1501 tcg_gen_movi_tl(cpu_PC
, pc
);
1504 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
1506 TranslationBlock
*tb
;
1508 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
1509 likely(!ctx
->singlestep_enabled
)) {
1512 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
1515 if (ctx
->singlestep_enabled
) {
1516 /* raise exception debug */
1522 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
1523 TCGv r2
, int16_t address
)
1526 jumpLabel
= gen_new_label();
1527 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
1529 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
1531 gen_set_label(jumpLabel
);
1532 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
1535 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
1536 int r2
, int16_t address
)
1538 TCGv temp
= tcg_const_i32(r2
);
1539 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
1540 tcg_temp_free(temp
);
1543 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
1546 l1
= gen_new_label();
1548 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
1549 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
1550 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
1552 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
1555 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
1556 int r2
, int32_t constant
, int32_t offset
)
1562 /* SB-format jumps */
1565 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1567 case OPC1_32_B_CALL
:
1568 case OPC1_16_SB_CALL
:
1569 gen_helper_1arg(call
, ctx
->next_pc
);
1570 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1573 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
1575 case OPC1_16_SB_JNZ
:
1576 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
1578 /* SBC-format jumps */
1579 case OPC1_16_SBC_JEQ
:
1580 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
1582 case OPC1_16_SBC_JNE
:
1583 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
1585 /* SBRN-format jumps */
1586 case OPC1_16_SBRN_JZ_T
:
1587 temp
= tcg_temp_new();
1588 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
1589 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
1590 tcg_temp_free(temp
);
1592 case OPC1_16_SBRN_JNZ_T
:
1593 temp
= tcg_temp_new();
1594 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
1595 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
1596 tcg_temp_free(temp
);
1598 /* SBR-format jumps */
1599 case OPC1_16_SBR_JEQ
:
1600 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
1603 case OPC1_16_SBR_JNE
:
1604 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
1607 case OPC1_16_SBR_JNZ
:
1608 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
1610 case OPC1_16_SBR_JNZ_A
:
1611 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
1613 case OPC1_16_SBR_JGEZ
:
1614 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
1616 case OPC1_16_SBR_JGTZ
:
1617 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
1619 case OPC1_16_SBR_JLEZ
:
1620 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
1622 case OPC1_16_SBR_JLTZ
:
1623 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
1625 case OPC1_16_SBR_JZ
:
1626 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
1628 case OPC1_16_SBR_JZ_A
:
1629 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
1631 case OPC1_16_SBR_LOOP
:
1632 gen_loop(ctx
, r1
, offset
* 2 - 32);
1634 /* SR-format jumps */
1636 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
1639 case OPC2_16_SR_RET
:
1640 gen_helper_ret(cpu_env
);
1644 case OPC1_32_B_CALLA
:
1645 gen_helper_1arg(call
, ctx
->next_pc
);
1646 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
1649 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
1651 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
1654 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
1655 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1658 case OPCM_32_BRC_EQ_NEQ
:
1659 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
1660 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
1662 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
1665 case OPCM_32_BRC_GE
:
1666 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
1667 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
1669 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
1670 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
1674 case OPCM_32_BRC_JLT
:
1675 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
1676 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
1678 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
1679 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
1683 case OPCM_32_BRC_JNE
:
1684 temp
= tcg_temp_new();
1685 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
1686 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1687 /* subi is unconditional */
1688 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1689 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
1691 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1692 /* addi is unconditional */
1693 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1694 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
1696 tcg_temp_free(temp
);
1699 case OPCM_32_BRN_JTT
:
1700 n
= MASK_OP_BRN_N(ctx
->opcode
);
1702 temp
= tcg_temp_new();
1703 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
1705 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
1706 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
1708 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
1710 tcg_temp_free(temp
);
1713 case OPCM_32_BRR_EQ_NEQ
:
1714 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
1715 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1718 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1722 case OPCM_32_BRR_ADDR_EQ_NEQ
:
1723 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
1724 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1727 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
1731 case OPCM_32_BRR_GE
:
1732 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
1733 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1736 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1740 case OPCM_32_BRR_JLT
:
1741 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
1742 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1745 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
1749 case OPCM_32_BRR_LOOP
:
1750 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
1751 gen_loop(ctx
, r1
, offset
* 2);
1753 /* OPC2_32_BRR_LOOPU */
1754 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
1757 case OPCM_32_BRR_JNE
:
1758 temp
= tcg_temp_new();
1759 temp2
= tcg_temp_new();
1760 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
1761 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1762 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1763 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
1764 /* subi is unconditional */
1765 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1766 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
1768 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
1769 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1770 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
1771 /* addi is unconditional */
1772 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
1773 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
1775 tcg_temp_free(temp
);
1776 tcg_temp_free(temp2
);
1778 case OPCM_32_BRR_JNZ
:
1779 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
1780 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
1782 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
1786 printf("Branch Error at %x\n", ctx
->pc
);
1788 ctx
->bstate
= BS_BRANCH
;
1793 * Functions for decoding instructions
1796 static void decode_src_opc(DisasContext
*ctx
, int op1
)
1802 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
1803 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
1806 case OPC1_16_SRC_ADD
:
1807 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1809 case OPC1_16_SRC_ADD_A15
:
1810 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
1812 case OPC1_16_SRC_ADD_15A
:
1813 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
1815 case OPC1_16_SRC_ADD_A
:
1816 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
1818 case OPC1_16_SRC_CADD
:
1819 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
1822 case OPC1_16_SRC_CADDN
:
1823 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
1826 case OPC1_16_SRC_CMOV
:
1827 temp
= tcg_const_tl(0);
1828 temp2
= tcg_const_tl(const4
);
1829 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1830 temp2
, cpu_gpr_d
[r1
]);
1831 tcg_temp_free(temp
);
1832 tcg_temp_free(temp2
);
1834 case OPC1_16_SRC_CMOVN
:
1835 temp
= tcg_const_tl(0);
1836 temp2
= tcg_const_tl(const4
);
1837 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1838 temp2
, cpu_gpr_d
[r1
]);
1839 tcg_temp_free(temp
);
1840 tcg_temp_free(temp2
);
1842 case OPC1_16_SRC_EQ
:
1843 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1846 case OPC1_16_SRC_LT
:
1847 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1850 case OPC1_16_SRC_MOV
:
1851 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
1853 case OPC1_16_SRC_MOV_A
:
1854 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
1855 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
1857 case OPC1_16_SRC_SH
:
1858 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1860 case OPC1_16_SRC_SHA
:
1861 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
1866 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
1871 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
1872 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
1875 case OPC1_16_SRR_ADD
:
1876 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1878 case OPC1_16_SRR_ADD_A15
:
1879 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
1881 case OPC1_16_SRR_ADD_15A
:
1882 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1884 case OPC1_16_SRR_ADD_A
:
1885 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
1887 case OPC1_16_SRR_ADDS
:
1888 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1890 case OPC1_16_SRR_AND
:
1891 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1893 case OPC1_16_SRR_CMOV
:
1894 temp
= tcg_const_tl(0);
1895 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1896 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
1897 tcg_temp_free(temp
);
1899 case OPC1_16_SRR_CMOVN
:
1900 temp
= tcg_const_tl(0);
1901 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
1902 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
1903 tcg_temp_free(temp
);
1905 case OPC1_16_SRR_EQ
:
1906 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1909 case OPC1_16_SRR_LT
:
1910 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
1913 case OPC1_16_SRR_MOV
:
1914 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1916 case OPC1_16_SRR_MOV_A
:
1917 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
1919 case OPC1_16_SRR_MOV_AA
:
1920 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
1922 case OPC1_16_SRR_MOV_D
:
1923 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
1925 case OPC1_16_SRR_MUL
:
1926 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1928 case OPC1_16_SRR_OR
:
1929 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1931 case OPC1_16_SRR_SUB
:
1932 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1934 case OPC1_16_SRR_SUB_A15B
:
1935 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
1937 case OPC1_16_SRR_SUB_15AB
:
1938 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1940 case OPC1_16_SRR_SUBS
:
1941 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1943 case OPC1_16_SRR_XOR
:
1944 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
1949 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
1953 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
1954 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
1957 case OPC1_16_SSR_ST_A
:
1958 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1960 case OPC1_16_SSR_ST_A_POSTINC
:
1961 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1962 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1964 case OPC1_16_SSR_ST_B
:
1965 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1967 case OPC1_16_SSR_ST_B_POSTINC
:
1968 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
1969 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
1971 case OPC1_16_SSR_ST_H
:
1972 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
1974 case OPC1_16_SSR_ST_H_POSTINC
:
1975 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
1976 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
1978 case OPC1_16_SSR_ST_W
:
1979 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1981 case OPC1_16_SSR_ST_W_POSTINC
:
1982 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
1983 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
1988 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
1992 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
1995 case OPC1_16_SC_AND
:
1996 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
1998 case OPC1_16_SC_BISR
:
1999 gen_helper_1arg(bisr
, const16
& 0xff);
2001 case OPC1_16_SC_LD_A
:
2002 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2004 case OPC1_16_SC_LD_W
:
2005 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2007 case OPC1_16_SC_MOV
:
2008 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
2011 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
2013 case OPC1_16_SC_ST_A
:
2014 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2016 case OPC1_16_SC_ST_W
:
2017 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2019 case OPC1_16_SC_SUB_A
:
2020 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
2025 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
2029 r1
= MASK_OP_SLR_D(ctx
->opcode
);
2030 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
2034 case OPC1_16_SLR_LD_A
:
2035 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2037 case OPC1_16_SLR_LD_A_POSTINC
:
2038 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2039 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2041 case OPC1_16_SLR_LD_BU
:
2042 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2044 case OPC1_16_SLR_LD_BU_POSTINC
:
2045 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2046 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
2048 case OPC1_16_SLR_LD_H
:
2049 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2051 case OPC1_16_SLR_LD_H_POSTINC
:
2052 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2053 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
2055 case OPC1_16_SLR_LD_W
:
2056 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2058 case OPC1_16_SLR_LD_W_POSTINC
:
2059 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2060 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2065 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
2070 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
2071 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
2075 case OPC1_16_SRO_LD_A
:
2076 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2078 case OPC1_16_SRO_LD_BU
:
2079 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2081 case OPC1_16_SRO_LD_H
:
2082 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
2084 case OPC1_16_SRO_LD_W
:
2085 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2087 case OPC1_16_SRO_ST_A
:
2088 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2090 case OPC1_16_SRO_ST_B
:
2091 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2093 case OPC1_16_SRO_ST_H
:
2094 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
2096 case OPC1_16_SRO_ST_W
:
2097 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2102 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
2105 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2108 case OPC2_16_SR_NOP
:
2110 case OPC2_16_SR_RET
:
2111 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
2113 case OPC2_16_SR_RFE
:
2114 gen_helper_rfe(cpu_env
);
2116 ctx
->bstate
= BS_BRANCH
;
2118 case OPC2_16_SR_DEBUG
:
2119 /* raise EXCP_DEBUG */
2124 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
2130 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2131 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2134 case OPC2_16_SR_RSUB
:
2135 /* overflow only if r1 = -0x80000000 */
2136 temp
= tcg_const_i32(-0x80000000);
2138 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
2139 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2141 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2143 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2145 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2146 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
2148 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2149 tcg_temp_free(temp
);
2151 case OPC2_16_SR_SAT_B
:
2152 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
2154 case OPC2_16_SR_SAT_BU
:
2155 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
2157 case OPC2_16_SR_SAT_H
:
2158 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
2160 case OPC2_16_SR_SAT_HU
:
2161 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
2166 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2174 op1
= MASK_OP_MAJOR(ctx
->opcode
);
2176 /* handle ADDSC.A opcode only being 6 bit long */
2177 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
2178 op1
= OPC1_16_SRRS_ADDSC_A
;
2182 case OPC1_16_SRC_ADD
:
2183 case OPC1_16_SRC_ADD_A15
:
2184 case OPC1_16_SRC_ADD_15A
:
2185 case OPC1_16_SRC_ADD_A
:
2186 case OPC1_16_SRC_CADD
:
2187 case OPC1_16_SRC_CADDN
:
2188 case OPC1_16_SRC_CMOV
:
2189 case OPC1_16_SRC_CMOVN
:
2190 case OPC1_16_SRC_EQ
:
2191 case OPC1_16_SRC_LT
:
2192 case OPC1_16_SRC_MOV
:
2193 case OPC1_16_SRC_MOV_A
:
2194 case OPC1_16_SRC_SH
:
2195 case OPC1_16_SRC_SHA
:
2196 decode_src_opc(ctx
, op1
);
2199 case OPC1_16_SRR_ADD
:
2200 case OPC1_16_SRR_ADD_A15
:
2201 case OPC1_16_SRR_ADD_15A
:
2202 case OPC1_16_SRR_ADD_A
:
2203 case OPC1_16_SRR_ADDS
:
2204 case OPC1_16_SRR_AND
:
2205 case OPC1_16_SRR_CMOV
:
2206 case OPC1_16_SRR_CMOVN
:
2207 case OPC1_16_SRR_EQ
:
2208 case OPC1_16_SRR_LT
:
2209 case OPC1_16_SRR_MOV
:
2210 case OPC1_16_SRR_MOV_A
:
2211 case OPC1_16_SRR_MOV_AA
:
2212 case OPC1_16_SRR_MOV_D
:
2213 case OPC1_16_SRR_MUL
:
2214 case OPC1_16_SRR_OR
:
2215 case OPC1_16_SRR_SUB
:
2216 case OPC1_16_SRR_SUB_A15B
:
2217 case OPC1_16_SRR_SUB_15AB
:
2218 case OPC1_16_SRR_SUBS
:
2219 case OPC1_16_SRR_XOR
:
2220 decode_srr_opc(ctx
, op1
);
2223 case OPC1_16_SSR_ST_A
:
2224 case OPC1_16_SSR_ST_A_POSTINC
:
2225 case OPC1_16_SSR_ST_B
:
2226 case OPC1_16_SSR_ST_B_POSTINC
:
2227 case OPC1_16_SSR_ST_H
:
2228 case OPC1_16_SSR_ST_H_POSTINC
:
2229 case OPC1_16_SSR_ST_W
:
2230 case OPC1_16_SSR_ST_W_POSTINC
:
2231 decode_ssr_opc(ctx
, op1
);
2234 case OPC1_16_SRRS_ADDSC_A
:
2235 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
2236 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
2237 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
2238 temp
= tcg_temp_new();
2239 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
2240 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
2241 tcg_temp_free(temp
);
2244 case OPC1_16_SLRO_LD_A
:
2245 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2246 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2247 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2249 case OPC1_16_SLRO_LD_BU
:
2250 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2251 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2252 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2254 case OPC1_16_SLRO_LD_H
:
2255 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2256 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2257 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2259 case OPC1_16_SLRO_LD_W
:
2260 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2261 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2262 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2265 case OPC1_16_SB_CALL
:
2267 case OPC1_16_SB_JNZ
:
2269 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
2270 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
2273 case OPC1_16_SBC_JEQ
:
2274 case OPC1_16_SBC_JNE
:
2275 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
2276 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
2277 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2280 case OPC1_16_SBRN_JNZ_T
:
2281 case OPC1_16_SBRN_JZ_T
:
2282 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
2283 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
2284 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2287 case OPC1_16_SBR_JEQ
:
2288 case OPC1_16_SBR_JGEZ
:
2289 case OPC1_16_SBR_JGTZ
:
2290 case OPC1_16_SBR_JLEZ
:
2291 case OPC1_16_SBR_JLTZ
:
2292 case OPC1_16_SBR_JNE
:
2293 case OPC1_16_SBR_JNZ
:
2294 case OPC1_16_SBR_JNZ_A
:
2295 case OPC1_16_SBR_JZ
:
2296 case OPC1_16_SBR_JZ_A
:
2297 case OPC1_16_SBR_LOOP
:
2298 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
2299 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
2300 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
2303 case OPC1_16_SC_AND
:
2304 case OPC1_16_SC_BISR
:
2305 case OPC1_16_SC_LD_A
:
2306 case OPC1_16_SC_LD_W
:
2307 case OPC1_16_SC_MOV
:
2309 case OPC1_16_SC_ST_A
:
2310 case OPC1_16_SC_ST_W
:
2311 case OPC1_16_SC_SUB_A
:
2312 decode_sc_opc(ctx
, op1
);
2315 case OPC1_16_SLR_LD_A
:
2316 case OPC1_16_SLR_LD_A_POSTINC
:
2317 case OPC1_16_SLR_LD_BU
:
2318 case OPC1_16_SLR_LD_BU_POSTINC
:
2319 case OPC1_16_SLR_LD_H
:
2320 case OPC1_16_SLR_LD_H_POSTINC
:
2321 case OPC1_16_SLR_LD_W
:
2322 case OPC1_16_SLR_LD_W_POSTINC
:
2323 decode_slr_opc(ctx
, op1
);
2326 case OPC1_16_SRO_LD_A
:
2327 case OPC1_16_SRO_LD_BU
:
2328 case OPC1_16_SRO_LD_H
:
2329 case OPC1_16_SRO_LD_W
:
2330 case OPC1_16_SRO_ST_A
:
2331 case OPC1_16_SRO_ST_B
:
2332 case OPC1_16_SRO_ST_H
:
2333 case OPC1_16_SRO_ST_W
:
2334 decode_sro_opc(ctx
, op1
);
2337 case OPC1_16_SSRO_ST_A
:
2338 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2339 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2340 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2342 case OPC1_16_SSRO_ST_B
:
2343 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2344 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2345 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2347 case OPC1_16_SSRO_ST_H
:
2348 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2349 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2350 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2352 case OPC1_16_SSRO_ST_W
:
2353 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2354 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2355 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2358 case OPCM_16_SR_SYSTEM
:
2359 decode_sr_system(env
, ctx
);
2361 case OPCM_16_SR_ACCU
:
2362 decode_sr_accu(env
, ctx
);
2365 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2366 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
2368 case OPC1_16_SR_NOT
:
2369 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2370 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2376 * 32 bit instructions
2380 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
2387 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2388 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2389 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2391 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2394 case OPC2_32_ABS_LD_A
:
2395 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2397 case OPC2_32_ABS_LD_D
:
2398 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2400 case OPC2_32_ABS_LD_DA
:
2401 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2403 case OPC2_32_ABS_LD_W
:
2404 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2408 tcg_temp_free(temp
);
2411 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
2418 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2419 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2420 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2422 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2425 case OPC2_32_ABS_LD_B
:
2426 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
2428 case OPC2_32_ABS_LD_BU
:
2429 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
2431 case OPC2_32_ABS_LD_H
:
2432 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
2434 case OPC2_32_ABS_LD_HU
:
2435 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2439 tcg_temp_free(temp
);
2442 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
2449 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2450 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2451 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2453 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2456 case OPC2_32_ABS_LDMST
:
2457 gen_ldmst(ctx
, r1
, temp
);
2459 case OPC2_32_ABS_SWAP_W
:
2460 gen_swap(ctx
, r1
, temp
);
2464 tcg_temp_free(temp
);
2467 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
2472 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2473 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2476 case OPC2_32_ABS_LDLCX
:
2477 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
2479 case OPC2_32_ABS_LDUCX
:
2480 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
2482 case OPC2_32_ABS_STLCX
:
2483 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
2485 case OPC2_32_ABS_STUCX
:
2486 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
2491 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
2498 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2499 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2500 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2502 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2505 case OPC2_32_ABS_ST_A
:
2506 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2508 case OPC2_32_ABS_ST_D
:
2509 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2511 case OPC2_32_ABS_ST_DA
:
2512 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2514 case OPC2_32_ABS_ST_W
:
2515 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2519 tcg_temp_free(temp
);
2522 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
2529 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2530 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2531 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2533 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2536 case OPC2_32_ABS_ST_B
:
2537 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
2539 case OPC2_32_ABS_ST_H
:
2540 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2543 tcg_temp_free(temp
);
2548 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2554 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2555 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2556 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2557 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2558 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2559 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2563 case OPC2_32_BIT_AND_AND_T
:
2564 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2565 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
2567 case OPC2_32_BIT_AND_ANDN_T
:
2568 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2569 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
2571 case OPC2_32_BIT_AND_NOR_T
:
2572 if (TCG_TARGET_HAS_andc_i32
) {
2573 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2574 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
2576 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2577 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
2580 case OPC2_32_BIT_AND_OR_T
:
2581 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2582 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
2587 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
2592 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2593 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2594 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2595 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2596 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2597 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2600 case OPC2_32_BIT_AND_T
:
2601 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2602 pos1
, pos2
, &tcg_gen_and_tl
);
2604 case OPC2_32_BIT_ANDN_T
:
2605 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2606 pos1
, pos2
, &tcg_gen_andc_tl
);
2608 case OPC2_32_BIT_NOR_T
:
2609 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2610 pos1
, pos2
, &tcg_gen_nor_tl
);
2612 case OPC2_32_BIT_OR_T
:
2613 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2614 pos1
, pos2
, &tcg_gen_or_tl
);
2619 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
2625 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2626 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2627 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2628 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2629 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2630 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2632 temp
= tcg_temp_new();
2634 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
2635 if (op2
== OPC2_32_BIT_INSN_T
) {
2636 tcg_gen_not_tl(temp
, temp
);
2638 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
2639 tcg_temp_free(temp
);
2642 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
2649 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2650 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2651 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2652 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2653 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2654 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2657 case OPC2_32_BIT_NAND_T
:
2658 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2659 pos1
, pos2
, &tcg_gen_nand_tl
);
2661 case OPC2_32_BIT_ORN_T
:
2662 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2663 pos1
, pos2
, &tcg_gen_orc_tl
);
2665 case OPC2_32_BIT_XNOR_T
:
2666 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2667 pos1
, pos2
, &tcg_gen_eqv_tl
);
2669 case OPC2_32_BIT_XOR_T
:
2670 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2671 pos1
, pos2
, &tcg_gen_xor_tl
);
2676 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
2683 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2684 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2685 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2686 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2687 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2688 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2691 case OPC2_32_BIT_OR_AND_T
:
2692 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2693 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
2695 case OPC2_32_BIT_OR_ANDN_T
:
2696 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2697 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
2699 case OPC2_32_BIT_OR_NOR_T
:
2700 if (TCG_TARGET_HAS_orc_i32
) {
2701 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2702 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
2704 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2705 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
2708 case OPC2_32_BIT_OR_OR_T
:
2709 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2710 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
2715 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
2722 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2723 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2724 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2725 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2726 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2727 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2729 temp
= tcg_temp_new();
2732 case OPC2_32_BIT_SH_AND_T
:
2733 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2734 pos1
, pos2
, &tcg_gen_and_tl
);
2736 case OPC2_32_BIT_SH_ANDN_T
:
2737 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2738 pos1
, pos2
, &tcg_gen_andc_tl
);
2740 case OPC2_32_BIT_SH_NOR_T
:
2741 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2742 pos1
, pos2
, &tcg_gen_nor_tl
);
2744 case OPC2_32_BIT_SH_OR_T
:
2745 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2746 pos1
, pos2
, &tcg_gen_or_tl
);
2749 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
2750 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
2751 tcg_temp_free(temp
);
2754 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
2761 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
2762 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
2763 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
2764 r3
= MASK_OP_BIT_D(ctx
->opcode
);
2765 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
2766 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
2768 temp
= tcg_temp_new();
2771 case OPC2_32_BIT_SH_NAND_T
:
2772 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
2773 pos1
, pos2
, &tcg_gen_nand_tl
);
2775 case OPC2_32_BIT_SH_ORN_T
:
2776 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2777 pos1
, pos2
, &tcg_gen_orc_tl
);
2779 case OPC2_32_BIT_SH_XNOR_T
:
2780 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2781 pos1
, pos2
, &tcg_gen_eqv_tl
);
2783 case OPC2_32_BIT_SH_XOR_T
:
2784 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2785 pos1
, pos2
, &tcg_gen_xor_tl
);
2788 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
2789 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
2790 tcg_temp_free(temp
);
2796 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
2804 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2805 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2806 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2807 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2810 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
2811 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
2812 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
2813 /* instruction to access the cache */
2815 case OPC2_32_BO_CACHEA_WI_POSTINC
:
2816 case OPC2_32_BO_CACHEA_W_POSTINC
:
2817 case OPC2_32_BO_CACHEA_I_POSTINC
:
2818 /* instruction to access the cache, but we still need to handle
2819 the addressing mode */
2820 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2822 case OPC2_32_BO_CACHEA_WI_PREINC
:
2823 case OPC2_32_BO_CACHEA_W_PREINC
:
2824 case OPC2_32_BO_CACHEA_I_PREINC
:
2825 /* instruction to access the cache, but we still need to handle
2826 the addressing mode */
2827 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2829 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
2830 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
2831 /* TODO: Raise illegal opcode trap,
2832 if !tricore_feature(TRICORE_FEATURE_131) */
2834 case OPC2_32_BO_CACHEI_W_POSTINC
:
2835 case OPC2_32_BO_CACHEI_WI_POSTINC
:
2836 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
2837 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2838 } /* TODO: else raise illegal opcode trap */
2840 case OPC2_32_BO_CACHEI_W_PREINC
:
2841 case OPC2_32_BO_CACHEI_WI_PREINC
:
2842 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
2843 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
2844 } /* TODO: else raise illegal opcode trap */
2846 case OPC2_32_BO_ST_A_SHORTOFF
:
2847 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
2849 case OPC2_32_BO_ST_A_POSTINC
:
2850 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2852 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2854 case OPC2_32_BO_ST_A_PREINC
:
2855 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
2857 case OPC2_32_BO_ST_B_SHORTOFF
:
2858 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2860 case OPC2_32_BO_ST_B_POSTINC
:
2861 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2863 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2865 case OPC2_32_BO_ST_B_PREINC
:
2866 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
2868 case OPC2_32_BO_ST_D_SHORTOFF
:
2869 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
2872 case OPC2_32_BO_ST_D_POSTINC
:
2873 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
2874 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2876 case OPC2_32_BO_ST_D_PREINC
:
2877 temp
= tcg_temp_new();
2878 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2879 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2880 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2881 tcg_temp_free(temp
);
2883 case OPC2_32_BO_ST_DA_SHORTOFF
:
2884 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2887 case OPC2_32_BO_ST_DA_POSTINC
:
2888 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
2889 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2891 case OPC2_32_BO_ST_DA_PREINC
:
2892 temp
= tcg_temp_new();
2893 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
2894 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2895 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
2896 tcg_temp_free(temp
);
2898 case OPC2_32_BO_ST_H_SHORTOFF
:
2899 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2901 case OPC2_32_BO_ST_H_POSTINC
:
2902 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2904 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2906 case OPC2_32_BO_ST_H_PREINC
:
2907 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2909 case OPC2_32_BO_ST_Q_SHORTOFF
:
2910 temp
= tcg_temp_new();
2911 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2912 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2913 tcg_temp_free(temp
);
2915 case OPC2_32_BO_ST_Q_POSTINC
:
2916 temp
= tcg_temp_new();
2917 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2918 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
2920 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2921 tcg_temp_free(temp
);
2923 case OPC2_32_BO_ST_Q_PREINC
:
2924 temp
= tcg_temp_new();
2925 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
2926 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
2927 tcg_temp_free(temp
);
2929 case OPC2_32_BO_ST_W_SHORTOFF
:
2930 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2932 case OPC2_32_BO_ST_W_POSTINC
:
2933 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
2935 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
2937 case OPC2_32_BO_ST_W_PREINC
:
2938 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
2943 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
2949 TCGv temp
, temp2
, temp3
;
2951 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
2952 r2
= MASK_OP_BO_S2(ctx
->opcode
);
2953 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
2954 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
2956 temp
= tcg_temp_new();
2957 temp2
= tcg_temp_new();
2958 temp3
= tcg_const_i32(off10
);
2960 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
2961 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
2964 case OPC2_32_BO_CACHEA_WI_BR
:
2965 case OPC2_32_BO_CACHEA_W_BR
:
2966 case OPC2_32_BO_CACHEA_I_BR
:
2967 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2969 case OPC2_32_BO_CACHEA_WI_CIRC
:
2970 case OPC2_32_BO_CACHEA_W_CIRC
:
2971 case OPC2_32_BO_CACHEA_I_CIRC
:
2972 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2974 case OPC2_32_BO_ST_A_BR
:
2975 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2976 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2978 case OPC2_32_BO_ST_A_CIRC
:
2979 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2980 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2982 case OPC2_32_BO_ST_B_BR
:
2983 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2984 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2986 case OPC2_32_BO_ST_B_CIRC
:
2987 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
2988 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
2990 case OPC2_32_BO_ST_D_BR
:
2991 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
2992 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
2994 case OPC2_32_BO_ST_D_CIRC
:
2995 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
2996 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
2997 tcg_gen_addi_tl(temp
, temp
, 4);
2998 tcg_gen_rem_tl(temp
, temp
, temp2
);
2999 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3000 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3001 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3003 case OPC2_32_BO_ST_DA_BR
:
3004 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3005 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3007 case OPC2_32_BO_ST_DA_CIRC
:
3008 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3009 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3010 tcg_gen_addi_tl(temp
, temp
, 4);
3011 tcg_gen_rem_tl(temp
, temp
, temp2
);
3012 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3013 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3014 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3016 case OPC2_32_BO_ST_H_BR
:
3017 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3018 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3020 case OPC2_32_BO_ST_H_CIRC
:
3021 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3022 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3024 case OPC2_32_BO_ST_Q_BR
:
3025 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3026 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3027 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3029 case OPC2_32_BO_ST_Q_CIRC
:
3030 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3031 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3032 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3034 case OPC2_32_BO_ST_W_BR
:
3035 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3036 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3038 case OPC2_32_BO_ST_W_CIRC
:
3039 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3040 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3043 tcg_temp_free(temp
);
3044 tcg_temp_free(temp2
);
3045 tcg_temp_free(temp3
);
3048 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
3056 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3057 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3058 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3059 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3062 case OPC2_32_BO_LD_A_SHORTOFF
:
3063 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3065 case OPC2_32_BO_LD_A_POSTINC
:
3066 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3068 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3070 case OPC2_32_BO_LD_A_PREINC
:
3071 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3073 case OPC2_32_BO_LD_B_SHORTOFF
:
3074 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3076 case OPC2_32_BO_LD_B_POSTINC
:
3077 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3079 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3081 case OPC2_32_BO_LD_B_PREINC
:
3082 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3084 case OPC2_32_BO_LD_BU_SHORTOFF
:
3085 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3087 case OPC2_32_BO_LD_BU_POSTINC
:
3088 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3090 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3092 case OPC2_32_BO_LD_BU_PREINC
:
3093 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3095 case OPC2_32_BO_LD_D_SHORTOFF
:
3096 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
3099 case OPC2_32_BO_LD_D_POSTINC
:
3100 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
3101 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3103 case OPC2_32_BO_LD_D_PREINC
:
3104 temp
= tcg_temp_new();
3105 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3106 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3107 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3108 tcg_temp_free(temp
);
3110 case OPC2_32_BO_LD_DA_SHORTOFF
:
3111 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3114 case OPC2_32_BO_LD_DA_POSTINC
:
3115 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
3116 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3118 case OPC2_32_BO_LD_DA_PREINC
:
3119 temp
= tcg_temp_new();
3120 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3121 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3122 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3123 tcg_temp_free(temp
);
3125 case OPC2_32_BO_LD_H_SHORTOFF
:
3126 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3128 case OPC2_32_BO_LD_H_POSTINC
:
3129 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3131 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3133 case OPC2_32_BO_LD_H_PREINC
:
3134 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3136 case OPC2_32_BO_LD_HU_SHORTOFF
:
3137 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3139 case OPC2_32_BO_LD_HU_POSTINC
:
3140 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3142 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3144 case OPC2_32_BO_LD_HU_PREINC
:
3145 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3147 case OPC2_32_BO_LD_Q_SHORTOFF
:
3148 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3149 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3151 case OPC2_32_BO_LD_Q_POSTINC
:
3152 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3154 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3155 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3157 case OPC2_32_BO_LD_Q_PREINC
:
3158 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3159 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3161 case OPC2_32_BO_LD_W_SHORTOFF
:
3162 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3164 case OPC2_32_BO_LD_W_POSTINC
:
3165 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3167 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3169 case OPC2_32_BO_LD_W_PREINC
:
3170 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3175 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
3182 TCGv temp
, temp2
, temp3
;
3184 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3185 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3186 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3187 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3189 temp
= tcg_temp_new();
3190 temp2
= tcg_temp_new();
3191 temp3
= tcg_const_i32(off10
);
3193 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3194 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3198 case OPC2_32_BO_LD_A_BR
:
3199 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3200 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3202 case OPC2_32_BO_LD_A_CIRC
:
3203 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3204 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3206 case OPC2_32_BO_LD_B_BR
:
3207 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3208 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3210 case OPC2_32_BO_LD_B_CIRC
:
3211 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3212 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3214 case OPC2_32_BO_LD_BU_BR
:
3215 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3216 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3218 case OPC2_32_BO_LD_BU_CIRC
:
3219 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3220 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3222 case OPC2_32_BO_LD_D_BR
:
3223 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3224 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3226 case OPC2_32_BO_LD_D_CIRC
:
3227 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3228 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3229 tcg_gen_addi_tl(temp
, temp
, 4);
3230 tcg_gen_rem_tl(temp
, temp
, temp2
);
3231 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3232 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3233 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3235 case OPC2_32_BO_LD_DA_BR
:
3236 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3237 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3239 case OPC2_32_BO_LD_DA_CIRC
:
3240 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3241 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3242 tcg_gen_addi_tl(temp
, temp
, 4);
3243 tcg_gen_rem_tl(temp
, temp
, temp2
);
3244 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3245 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3246 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3248 case OPC2_32_BO_LD_H_BR
:
3249 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3250 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3252 case OPC2_32_BO_LD_H_CIRC
:
3253 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3254 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3256 case OPC2_32_BO_LD_HU_BR
:
3257 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3258 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3260 case OPC2_32_BO_LD_HU_CIRC
:
3261 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3262 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3264 case OPC2_32_BO_LD_Q_BR
:
3265 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3266 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3267 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3269 case OPC2_32_BO_LD_Q_CIRC
:
3270 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3271 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3272 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3274 case OPC2_32_BO_LD_W_BR
:
3275 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3276 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3278 case OPC2_32_BO_LD_W_CIRC
:
3279 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3280 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3283 tcg_temp_free(temp
);
3284 tcg_temp_free(temp2
);
3285 tcg_temp_free(temp3
);
3288 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
3297 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3298 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3299 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3300 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3303 temp
= tcg_temp_new();
3304 temp2
= tcg_temp_new();
3307 case OPC2_32_BO_LDLCX_SHORTOFF
:
3308 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3309 gen_helper_ldlcx(cpu_env
, temp
);
3311 case OPC2_32_BO_LDMST_SHORTOFF
:
3312 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3313 gen_ldmst(ctx
, r1
, temp
);
3315 case OPC2_32_BO_LDMST_POSTINC
:
3316 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3317 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3319 case OPC2_32_BO_LDMST_PREINC
:
3320 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3321 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3323 case OPC2_32_BO_LDUCX_SHORTOFF
:
3324 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3325 gen_helper_lducx(cpu_env
, temp
);
3327 case OPC2_32_BO_LEA_SHORTOFF
:
3328 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
3330 case OPC2_32_BO_STLCX_SHORTOFF
:
3331 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3332 gen_helper_stlcx(cpu_env
, temp
);
3334 case OPC2_32_BO_STUCX_SHORTOFF
:
3335 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3336 gen_helper_stucx(cpu_env
, temp
);
3338 case OPC2_32_BO_SWAP_W_SHORTOFF
:
3339 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3340 gen_swap(ctx
, r1
, temp
);
3342 case OPC2_32_BO_SWAP_W_POSTINC
:
3343 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3344 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3346 case OPC2_32_BO_SWAP_W_PREINC
:
3347 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3348 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3351 tcg_temp_free(temp
);
3352 tcg_temp_free(temp2
);
3355 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
3362 TCGv temp
, temp2
, temp3
;
3364 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3365 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3366 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3367 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3369 temp
= tcg_temp_new();
3370 temp2
= tcg_temp_new();
3371 temp3
= tcg_const_i32(off10
);
3373 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3374 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3377 case OPC2_32_BO_LDMST_BR
:
3378 gen_ldmst(ctx
, r1
, temp2
);
3379 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3381 case OPC2_32_BO_LDMST_CIRC
:
3382 gen_ldmst(ctx
, r1
, temp2
);
3383 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3385 case OPC2_32_BO_SWAP_W_BR
:
3386 gen_swap(ctx
, r1
, temp2
);
3387 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3389 case OPC2_32_BO_SWAP_W_CIRC
:
3390 gen_swap(ctx
, r1
, temp2
);
3391 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3394 tcg_temp_free(temp
);
3395 tcg_temp_free(temp2
);
3396 tcg_temp_free(temp3
);
3399 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
3405 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
3406 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
3407 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
3410 case OPC1_32_BOL_LD_A_LONGOFF
:
3411 temp
= tcg_temp_new();
3412 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3413 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3414 tcg_temp_free(temp
);
3416 case OPC1_32_BOL_LD_W_LONGOFF
:
3417 temp
= tcg_temp_new();
3418 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3419 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3420 tcg_temp_free(temp
);
3422 case OPC1_32_BOL_LEA_LONGOFF
:
3423 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
3425 case OPC1_32_BOL_ST_A_LONGOFF
:
3426 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3427 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3429 /* raise illegal opcode trap */
3432 case OPC1_32_BOL_ST_W_LONGOFF
:
3433 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3435 case OPC1_32_BOL_LD_B_LONGOFF
:
3436 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3437 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
3439 /* raise illegal opcode trap */
3442 case OPC1_32_BOL_LD_BU_LONGOFF
:
3443 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3444 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
3446 /* raise illegal opcode trap */
3449 case OPC1_32_BOL_LD_H_LONGOFF
:
3450 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3451 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
3453 /* raise illegal opcode trap */
3456 case OPC1_32_BOL_LD_HU_LONGOFF
:
3457 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3458 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
3460 /* raise illegal opcode trap */
3463 case OPC1_32_BOL_ST_B_LONGOFF
:
3464 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3465 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
3467 /* raise illegal opcode trap */
3470 case OPC1_32_BOL_ST_H_LONGOFF
:
3471 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3472 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
3474 /* raise illegal opcode trap */
3481 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
3488 r2
= MASK_OP_RC_D(ctx
->opcode
);
3489 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3490 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3491 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3493 temp
= tcg_temp_new();
3496 case OPC2_32_RC_AND
:
3497 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3499 case OPC2_32_RC_ANDN
:
3500 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
3502 case OPC2_32_RC_NAND
:
3503 tcg_gen_movi_tl(temp
, const9
);
3504 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
3506 case OPC2_32_RC_NOR
:
3507 tcg_gen_movi_tl(temp
, const9
);
3508 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
3511 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3513 case OPC2_32_RC_ORN
:
3514 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
3517 const9
= sextract32(const9
, 0, 6);
3518 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3520 case OPC2_32_RC_SH_H
:
3521 const9
= sextract32(const9
, 0, 5);
3522 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3524 case OPC2_32_RC_SHA
:
3525 const9
= sextract32(const9
, 0, 6);
3526 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3528 case OPC2_32_RC_SHA_H
:
3529 const9
= sextract32(const9
, 0, 5);
3530 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3532 case OPC2_32_RC_SHAS
:
3533 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3535 case OPC2_32_RC_XNOR
:
3536 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3537 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
3539 case OPC2_32_RC_XOR
:
3540 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3543 tcg_temp_free(temp
);
3546 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
3554 r2
= MASK_OP_RC_D(ctx
->opcode
);
3555 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3556 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
3558 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3560 temp
= tcg_temp_new();
3563 case OPC2_32_RC_ABSDIF
:
3564 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3566 case OPC2_32_RC_ABSDIFS
:
3567 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3569 case OPC2_32_RC_ADD
:
3570 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3572 case OPC2_32_RC_ADDC
:
3573 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3575 case OPC2_32_RC_ADDS
:
3576 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3578 case OPC2_32_RC_ADDS_U
:
3579 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3581 case OPC2_32_RC_ADDX
:
3582 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3584 case OPC2_32_RC_AND_EQ
:
3585 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3586 const9
, &tcg_gen_and_tl
);
3588 case OPC2_32_RC_AND_GE
:
3589 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3590 const9
, &tcg_gen_and_tl
);
3592 case OPC2_32_RC_AND_GE_U
:
3593 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3594 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3595 const9
, &tcg_gen_and_tl
);
3597 case OPC2_32_RC_AND_LT
:
3598 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3599 const9
, &tcg_gen_and_tl
);
3601 case OPC2_32_RC_AND_LT_U
:
3602 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3603 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3604 const9
, &tcg_gen_and_tl
);
3606 case OPC2_32_RC_AND_NE
:
3607 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3608 const9
, &tcg_gen_and_tl
);
3611 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3613 case OPC2_32_RC_EQANY_B
:
3614 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3616 case OPC2_32_RC_EQANY_H
:
3617 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3620 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3622 case OPC2_32_RC_GE_U
:
3623 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3624 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3627 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3629 case OPC2_32_RC_LT_U
:
3630 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3631 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3633 case OPC2_32_RC_MAX
:
3634 tcg_gen_movi_tl(temp
, const9
);
3635 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3636 cpu_gpr_d
[r1
], temp
);
3638 case OPC2_32_RC_MAX_U
:
3639 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
3640 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3641 cpu_gpr_d
[r1
], temp
);
3643 case OPC2_32_RC_MIN
:
3644 tcg_gen_movi_tl(temp
, const9
);
3645 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3646 cpu_gpr_d
[r1
], temp
);
3648 case OPC2_32_RC_MIN_U
:
3649 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
3650 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
3651 cpu_gpr_d
[r1
], temp
);
3654 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3656 case OPC2_32_RC_OR_EQ
:
3657 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3658 const9
, &tcg_gen_or_tl
);
3660 case OPC2_32_RC_OR_GE
:
3661 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3662 const9
, &tcg_gen_or_tl
);
3664 case OPC2_32_RC_OR_GE_U
:
3665 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3666 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3667 const9
, &tcg_gen_or_tl
);
3669 case OPC2_32_RC_OR_LT
:
3670 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3671 const9
, &tcg_gen_or_tl
);
3673 case OPC2_32_RC_OR_LT_U
:
3674 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3675 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3676 const9
, &tcg_gen_or_tl
);
3678 case OPC2_32_RC_OR_NE
:
3679 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3680 const9
, &tcg_gen_or_tl
);
3682 case OPC2_32_RC_RSUB
:
3683 tcg_gen_movi_tl(temp
, const9
);
3684 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3686 case OPC2_32_RC_RSUBS
:
3687 tcg_gen_movi_tl(temp
, const9
);
3688 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3690 case OPC2_32_RC_RSUBS_U
:
3691 tcg_gen_movi_tl(temp
, const9
);
3692 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
3694 case OPC2_32_RC_SH_EQ
:
3695 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3697 case OPC2_32_RC_SH_GE
:
3698 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3700 case OPC2_32_RC_SH_GE_U
:
3701 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3702 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3704 case OPC2_32_RC_SH_LT
:
3705 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3707 case OPC2_32_RC_SH_LT_U
:
3708 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3709 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3711 case OPC2_32_RC_SH_NE
:
3712 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3714 case OPC2_32_RC_XOR_EQ
:
3715 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3716 const9
, &tcg_gen_xor_tl
);
3718 case OPC2_32_RC_XOR_GE
:
3719 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3720 const9
, &tcg_gen_xor_tl
);
3722 case OPC2_32_RC_XOR_GE_U
:
3723 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3724 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3725 const9
, &tcg_gen_xor_tl
);
3727 case OPC2_32_RC_XOR_LT
:
3728 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3729 const9
, &tcg_gen_xor_tl
);
3731 case OPC2_32_RC_XOR_LT_U
:
3732 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3733 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3734 const9
, &tcg_gen_xor_tl
);
3736 case OPC2_32_RC_XOR_NE
:
3737 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
3738 const9
, &tcg_gen_xor_tl
);
3741 tcg_temp_free(temp
);
3744 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
3749 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3750 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3753 case OPC2_32_RC_BISR
:
3754 gen_helper_1arg(bisr
, const9
);
3756 case OPC2_32_RC_SYSCALL
:
3757 /* TODO: Add exception generation */
3762 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
3768 r2
= MASK_OP_RC_D(ctx
->opcode
);
3769 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3770 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
3772 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3775 case OPC2_32_RC_MUL_32
:
3776 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3778 case OPC2_32_RC_MUL_64
:
3779 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
3781 case OPC2_32_RC_MULS_32
:
3782 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3784 case OPC2_32_RC_MUL_U_64
:
3785 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3786 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
3788 case OPC2_32_RC_MULS_U_32
:
3789 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3790 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3796 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3800 int32_t pos
, width
, const4
;
3804 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
3805 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
3806 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
3807 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
3808 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
3809 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
3812 case OPC2_32_RCPW_IMASK
:
3813 /* if pos + width > 31 undefined result */
3814 if (pos
+ width
<= 31) {
3815 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
3816 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
3819 case OPC2_32_RCPW_INSERT
:
3820 /* if pos + width > 32 undefined result */
3821 if (pos
+ width
<= 32) {
3822 temp
= tcg_const_i32(const4
);
3823 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
3824 tcg_temp_free(temp
);
3832 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3836 int32_t width
, const4
;
3838 TCGv temp
, temp2
, temp3
;
3840 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
3841 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
3842 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
3843 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
3844 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
3845 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
3847 temp
= tcg_temp_new();
3848 temp2
= tcg_temp_new();
3851 case OPC2_32_RCRW_IMASK
:
3852 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
3853 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
3854 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
3855 tcg_gen_movi_tl(temp2
, const4
);
3856 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
3858 case OPC2_32_RCRW_INSERT
:
3859 temp3
= tcg_temp_new();
3861 tcg_gen_movi_tl(temp
, width
);
3862 tcg_gen_movi_tl(temp2
, const4
);
3863 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
3864 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
3866 tcg_temp_free(temp3
);
3869 tcg_temp_free(temp
);
3870 tcg_temp_free(temp2
);
3875 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
3883 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3884 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3885 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3886 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3887 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3890 case OPC2_32_RCR_CADD
:
3891 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
3894 case OPC2_32_RCR_CADDN
:
3895 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
3898 case OPC2_32_RCR_SEL
:
3899 temp
= tcg_const_i32(0);
3900 temp2
= tcg_const_i32(const9
);
3901 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r4
], temp
,
3902 cpu_gpr_d
[r1
], temp2
);
3903 tcg_temp_free(temp
);
3904 tcg_temp_free(temp2
);
3906 case OPC2_32_RCR_SELN
:
3907 temp
= tcg_const_i32(0);
3908 temp2
= tcg_const_i32(const9
);
3909 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r4
], temp
,
3910 cpu_gpr_d
[r1
], temp2
);
3911 tcg_temp_free(temp
);
3912 tcg_temp_free(temp2
);
3917 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
3924 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3925 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3926 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3927 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3928 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3931 case OPC2_32_RCR_MADD_32
:
3932 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3934 case OPC2_32_RCR_MADD_64
:
3935 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3936 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3938 case OPC2_32_RCR_MADDS_32
:
3939 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3941 case OPC2_32_RCR_MADDS_64
:
3942 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3943 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3945 case OPC2_32_RCR_MADD_U_64
:
3946 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3947 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3948 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3950 case OPC2_32_RCR_MADDS_U_32
:
3951 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3952 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3954 case OPC2_32_RCR_MADDS_U_64
:
3955 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3956 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3957 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3962 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
3969 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
3970 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
3971 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
3972 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
3973 r4
= MASK_OP_RCR_D(ctx
->opcode
);
3976 case OPC2_32_RCR_MSUB_32
:
3977 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3979 case OPC2_32_RCR_MSUB_64
:
3980 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3981 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3983 case OPC2_32_RCR_MSUBS_32
:
3984 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3986 case OPC2_32_RCR_MSUBS_64
:
3987 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3988 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3990 case OPC2_32_RCR_MSUB_U_64
:
3991 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3992 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
3993 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
3995 case OPC2_32_RCR_MSUBS_U_32
:
3996 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
3997 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
3999 case OPC2_32_RCR_MSUBS_U_64
:
4000 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4001 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4002 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4009 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
4015 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
4016 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
4017 r2
= MASK_OP_RLC_D(ctx
->opcode
);
4020 case OPC1_32_RLC_ADDI
:
4021 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
4023 case OPC1_32_RLC_ADDIH
:
4024 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
4026 case OPC1_32_RLC_ADDIH_A
:
4027 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
4029 case OPC1_32_RLC_MFCR
:
4030 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4031 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
4033 case OPC1_32_RLC_MOV
:
4034 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4036 case OPC1_32_RLC_MOV_64
:
4037 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4038 if ((r2
& 0x1) != 0) {
4039 /* TODO: raise OPD trap */
4041 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4042 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
4044 /* TODO: raise illegal opcode trap */
4047 case OPC1_32_RLC_MOV_U
:
4048 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4049 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4051 case OPC1_32_RLC_MOV_H
:
4052 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
4054 case OPC1_32_RLC_MOVH_A
:
4055 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
4057 case OPC1_32_RLC_MTCR
:
4058 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4059 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
4065 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4070 r3
= MASK_OP_RR_D(ctx
->opcode
);
4071 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4072 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4073 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4076 case OPC2_32_RR_ABS
:
4077 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
4079 case OPC2_32_RR_ABS_B
:
4080 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4082 case OPC2_32_RR_ABS_H
:
4083 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4085 case OPC2_32_RR_ABSDIF
:
4086 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4088 case OPC2_32_RR_ABSDIF_B
:
4089 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4092 case OPC2_32_RR_ABSDIF_H
:
4093 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4096 case OPC2_32_RR_ABSDIFS
:
4097 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4100 case OPC2_32_RR_ABSDIFS_H
:
4101 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4104 case OPC2_32_RR_ABSS
:
4105 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4107 case OPC2_32_RR_ABSS_H
:
4108 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4110 case OPC2_32_RR_ADD
:
4111 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4113 case OPC2_32_RR_ADD_B
:
4114 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4116 case OPC2_32_RR_ADD_H
:
4117 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4119 case OPC2_32_RR_ADDC
:
4120 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4122 case OPC2_32_RR_ADDS
:
4123 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4125 case OPC2_32_RR_ADDS_H
:
4126 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4129 case OPC2_32_RR_ADDS_HU
:
4130 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4133 case OPC2_32_RR_ADDS_U
:
4134 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4137 case OPC2_32_RR_ADDX
:
4138 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4140 case OPC2_32_RR_AND_EQ
:
4141 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4142 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4144 case OPC2_32_RR_AND_GE
:
4145 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4146 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4148 case OPC2_32_RR_AND_GE_U
:
4149 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4150 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4152 case OPC2_32_RR_AND_LT
:
4153 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4154 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4156 case OPC2_32_RR_AND_LT_U
:
4157 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4158 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4160 case OPC2_32_RR_AND_NE
:
4161 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4162 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4165 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4168 case OPC2_32_RR_EQ_B
:
4169 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4171 case OPC2_32_RR_EQ_H
:
4172 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4174 case OPC2_32_RR_EQ_W
:
4175 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4177 case OPC2_32_RR_EQANY_B
:
4178 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4180 case OPC2_32_RR_EQANY_H
:
4181 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4184 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4187 case OPC2_32_RR_GE_U
:
4188 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4192 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4195 case OPC2_32_RR_LT_U
:
4196 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4199 case OPC2_32_RR_LT_B
:
4200 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4202 case OPC2_32_RR_LT_BU
:
4203 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4205 case OPC2_32_RR_LT_H
:
4206 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4208 case OPC2_32_RR_LT_HU
:
4209 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4211 case OPC2_32_RR_LT_W
:
4212 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4214 case OPC2_32_RR_LT_WU
:
4215 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4217 case OPC2_32_RR_MAX
:
4218 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4219 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4221 case OPC2_32_RR_MAX_U
:
4222 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4223 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4225 case OPC2_32_RR_MAX_B
:
4226 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4228 case OPC2_32_RR_MAX_BU
:
4229 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4231 case OPC2_32_RR_MAX_H
:
4232 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4234 case OPC2_32_RR_MAX_HU
:
4235 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4237 case OPC2_32_RR_MIN
:
4238 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4239 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4241 case OPC2_32_RR_MIN_U
:
4242 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4243 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4245 case OPC2_32_RR_MIN_B
:
4246 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4248 case OPC2_32_RR_MIN_BU
:
4249 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4251 case OPC2_32_RR_MIN_H
:
4252 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4254 case OPC2_32_RR_MIN_HU
:
4255 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4257 case OPC2_32_RR_MOV
:
4258 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
4261 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4264 case OPC2_32_RR_OR_EQ
:
4265 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4266 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4268 case OPC2_32_RR_OR_GE
:
4269 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4270 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4272 case OPC2_32_RR_OR_GE_U
:
4273 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4274 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4276 case OPC2_32_RR_OR_LT
:
4277 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4278 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4280 case OPC2_32_RR_OR_LT_U
:
4281 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4282 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4284 case OPC2_32_RR_OR_NE
:
4285 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4286 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4288 case OPC2_32_RR_SAT_B
:
4289 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
4291 case OPC2_32_RR_SAT_BU
:
4292 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
4294 case OPC2_32_RR_SAT_H
:
4295 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
4297 case OPC2_32_RR_SAT_HU
:
4298 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
4300 case OPC2_32_RR_SH_EQ
:
4301 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4304 case OPC2_32_RR_SH_GE
:
4305 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4308 case OPC2_32_RR_SH_GE_U
:
4309 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4312 case OPC2_32_RR_SH_LT
:
4313 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4316 case OPC2_32_RR_SH_LT_U
:
4317 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4320 case OPC2_32_RR_SH_NE
:
4321 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4324 case OPC2_32_RR_SUB
:
4325 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4327 case OPC2_32_RR_SUB_B
:
4328 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4330 case OPC2_32_RR_SUB_H
:
4331 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4333 case OPC2_32_RR_SUBC
:
4334 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4336 case OPC2_32_RR_SUBS
:
4337 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4339 case OPC2_32_RR_SUBS_U
:
4340 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4342 case OPC2_32_RR_SUBS_H
:
4343 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4346 case OPC2_32_RR_SUBS_HU
:
4347 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4350 case OPC2_32_RR_SUBX
:
4351 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4353 case OPC2_32_RR_XOR_EQ
:
4354 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4355 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4357 case OPC2_32_RR_XOR_GE
:
4358 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4359 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4361 case OPC2_32_RR_XOR_GE_U
:
4362 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4363 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4365 case OPC2_32_RR_XOR_LT
:
4366 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4367 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4369 case OPC2_32_RR_XOR_LT_U
:
4370 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4371 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4373 case OPC2_32_RR_XOR_NE
:
4374 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4375 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4380 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
4386 r3
= MASK_OP_RR_D(ctx
->opcode
);
4387 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4388 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4390 temp
= tcg_temp_new();
4391 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4394 case OPC2_32_RR_AND
:
4395 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4397 case OPC2_32_RR_ANDN
:
4398 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4400 case OPC2_32_RR_CLO
:
4401 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4403 case OPC2_32_RR_CLO_H
:
4404 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4406 case OPC2_32_RR_CLS
:
4407 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4409 case OPC2_32_RR_CLS_H
:
4410 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4412 case OPC2_32_RR_CLZ
:
4413 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4415 case OPC2_32_RR_CLZ_H
:
4416 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4418 case OPC2_32_RR_NAND
:
4419 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4421 case OPC2_32_RR_NOR
:
4422 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4425 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4427 case OPC2_32_RR_ORN
:
4428 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4431 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4433 case OPC2_32_RR_SH_H
:
4434 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4436 case OPC2_32_RR_SHA
:
4437 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4439 case OPC2_32_RR_SHA_H
:
4440 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4442 case OPC2_32_RR_SHAS
:
4443 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4445 case OPC2_32_RR_XNOR
:
4446 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4448 case OPC2_32_RR_XOR
:
4449 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4452 tcg_temp_free(temp
);
4455 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
4461 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4462 r3
= MASK_OP_RR_D(ctx
->opcode
);
4463 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4464 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4465 n
= MASK_OP_RR_N(ctx
->opcode
);
4468 case OPC2_32_RR_ADD_A
:
4469 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
4471 case OPC2_32_RR_ADDSC_A
:
4472 temp
= tcg_temp_new();
4473 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
4474 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
4475 tcg_temp_free(temp
);
4477 case OPC2_32_RR_ADDSC_AT
:
4478 temp
= tcg_temp_new();
4479 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
4480 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
4481 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
4482 tcg_temp_free(temp
);
4484 case OPC2_32_RR_EQ_A
:
4485 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4488 case OPC2_32_RR_EQZ
:
4489 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
4491 case OPC2_32_RR_GE_A
:
4492 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4495 case OPC2_32_RR_LT_A
:
4496 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4499 case OPC2_32_RR_MOV_A
:
4500 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
4502 case OPC2_32_RR_MOV_AA
:
4503 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
4505 case OPC2_32_RR_MOV_D
:
4506 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
4508 case OPC2_32_RR_NE_A
:
4509 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4512 case OPC2_32_RR_NEZ_A
:
4513 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
4515 case OPC2_32_RR_SUB_A
:
4516 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
4521 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
4526 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4527 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4531 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
4533 case OPC2_32_RR_JLI
:
4534 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
4535 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
4537 case OPC2_32_RR_CALLI
:
4538 gen_helper_1arg(call
, ctx
->next_pc
);
4539 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
4543 ctx
->bstate
= BS_BRANCH
;
4546 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
4553 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4554 r3
= MASK_OP_RR_D(ctx
->opcode
);
4555 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4556 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4559 case OPC2_32_RR_BMERGE
:
4560 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4562 case OPC2_32_RR_BSPLIT
:
4563 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
4565 case OPC2_32_RR_DVINIT_B
:
4566 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
4569 case OPC2_32_RR_DVINIT_BU
:
4570 temp
= tcg_temp_new();
4571 temp2
= tcg_temp_new();
4573 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
4574 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
4575 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
4576 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
4577 /* use cpu_PSW_AV to compare against 0 */
4578 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
4579 temp
, cpu_gpr_d
[r3
+1]);
4580 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
4581 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
4582 temp2
, cpu_gpr_d
[r2
]);
4583 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
4585 /* overflow = (D[b] == 0) */
4586 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
4588 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
4590 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
4592 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 8);
4593 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
4594 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp
);
4596 tcg_temp_free(temp
);
4597 tcg_temp_free(temp2
);
4599 case OPC2_32_RR_DVINIT_H
:
4600 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
4603 case OPC2_32_RR_DVINIT_HU
:
4604 temp
= tcg_temp_new();
4605 temp2
= tcg_temp_new();
4607 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
4608 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
4609 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
4610 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
4611 /* use cpu_PSW_AV to compare against 0 */
4612 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
4613 temp
, cpu_gpr_d
[r3
+1]);
4614 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
4615 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
4616 temp2
, cpu_gpr_d
[r2
]);
4617 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
4619 /* overflow = (D[b] == 0) */
4620 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
4622 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
4624 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
4626 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
4627 tcg_gen_shri_tl(cpu_gpr_d
[r3
+1], temp
, 16);
4628 tcg_gen_shli_tl(cpu_gpr_d
[r3
], temp
, 16);
4629 tcg_temp_free(temp
);
4630 tcg_temp_free(temp2
);
4632 case OPC2_32_RR_DVINIT
:
4633 temp
= tcg_temp_new();
4634 temp2
= tcg_temp_new();
4635 /* overflow = ((D[b] == 0) ||
4636 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
4637 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
4638 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
4639 tcg_gen_and_tl(temp
, temp
, temp2
);
4640 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
4641 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
4642 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
4644 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
4646 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
4648 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4649 /* sign extend to high reg */
4650 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
4651 tcg_temp_free(temp
);
4652 tcg_temp_free(temp2
);
4654 case OPC2_32_RR_DVINIT_U
:
4655 /* overflow = (D[b] == 0) */
4656 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
4657 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
4659 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
4661 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
4663 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4664 /* zero extend to high reg*/
4665 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
4667 case OPC2_32_RR_PARITY
:
4668 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4670 case OPC2_32_RR_UNPACK
:
4671 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
4677 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
4685 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
4686 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
4687 r3
= MASK_OP_RR1_D(ctx
->opcode
);
4688 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
4689 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
4692 case OPC2_32_RR1_MUL_H_32_LL
:
4693 temp64
= tcg_temp_new_i64();
4694 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4695 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4696 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
4697 tcg_temp_free_i64(temp64
);
4699 case OPC2_32_RR1_MUL_H_32_LU
:
4700 temp64
= tcg_temp_new_i64();
4701 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4702 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4703 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
4704 tcg_temp_free_i64(temp64
);
4706 case OPC2_32_RR1_MUL_H_32_UL
:
4707 temp64
= tcg_temp_new_i64();
4708 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4709 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4710 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
4711 tcg_temp_free_i64(temp64
);
4713 case OPC2_32_RR1_MUL_H_32_UU
:
4714 temp64
= tcg_temp_new_i64();
4715 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4716 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4717 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
4718 tcg_temp_free_i64(temp64
);
4720 case OPC2_32_RR1_MULM_H_64_LL
:
4721 temp64
= tcg_temp_new_i64();
4722 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4723 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4725 tcg_gen_movi_tl(cpu_PSW_V
, 0);
4727 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
4728 tcg_temp_free_i64(temp64
);
4730 case OPC2_32_RR1_MULM_H_64_LU
:
4731 temp64
= tcg_temp_new_i64();
4732 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4733 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4735 tcg_gen_movi_tl(cpu_PSW_V
, 0);
4737 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
4738 tcg_temp_free_i64(temp64
);
4740 case OPC2_32_RR1_MULM_H_64_UL
:
4741 temp64
= tcg_temp_new_i64();
4742 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4743 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4745 tcg_gen_movi_tl(cpu_PSW_V
, 0);
4747 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
4748 tcg_temp_free_i64(temp64
);
4750 case OPC2_32_RR1_MULM_H_64_UU
:
4751 temp64
= tcg_temp_new_i64();
4752 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4753 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
4755 tcg_gen_movi_tl(cpu_PSW_V
, 0);
4757 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
4758 tcg_temp_free_i64(temp64
);
4761 case OPC2_32_RR1_MULR_H_16_LL
:
4762 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4763 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
4765 case OPC2_32_RR1_MULR_H_16_LU
:
4766 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4767 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
4769 case OPC2_32_RR1_MULR_H_16_UL
:
4770 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4771 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
4773 case OPC2_32_RR1_MULR_H_16_UU
:
4774 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
4775 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
4781 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4785 int32_t address
, const16
;
4788 TCGv temp
, temp2
, temp3
;
4790 op1
= MASK_OP_MAJOR(ctx
->opcode
);
4792 /* handle JNZ.T opcode only being 7 bit long */
4793 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
4794 op1
= OPCM_32_BRN_JTT
;
4799 case OPCM_32_ABS_LDW
:
4800 decode_abs_ldw(env
, ctx
);
4802 case OPCM_32_ABS_LDB
:
4803 decode_abs_ldb(env
, ctx
);
4805 case OPCM_32_ABS_LDMST_SWAP
:
4806 decode_abs_ldst_swap(env
, ctx
);
4808 case OPCM_32_ABS_LDST_CONTEXT
:
4809 decode_abs_ldst_context(env
, ctx
);
4811 case OPCM_32_ABS_STORE
:
4812 decode_abs_store(env
, ctx
);
4814 case OPCM_32_ABS_STOREB_H
:
4815 decode_abs_storeb_h(env
, ctx
);
4817 case OPC1_32_ABS_STOREQ
:
4818 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4819 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4820 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4821 temp2
= tcg_temp_new();
4823 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
4824 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
4826 tcg_temp_free(temp2
);
4827 tcg_temp_free(temp
);
4829 case OPC1_32_ABS_LD_Q
:
4830 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4831 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4832 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4834 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4835 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4837 tcg_temp_free(temp
);
4839 case OPC1_32_ABS_LEA
:
4840 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4841 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4842 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
4845 case OPC1_32_ABSB_ST_T
:
4846 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4847 b
= MASK_OP_ABSB_B(ctx
->opcode
);
4848 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
4850 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4851 temp2
= tcg_temp_new();
4853 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
4854 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
4855 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
4856 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
4858 tcg_temp_free(temp
);
4859 tcg_temp_free(temp2
);
4862 case OPC1_32_B_CALL
:
4863 case OPC1_32_B_CALLA
:
4868 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
4869 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
4872 case OPCM_32_BIT_ANDACC
:
4873 decode_bit_andacc(env
, ctx
);
4875 case OPCM_32_BIT_LOGICAL_T1
:
4876 decode_bit_logical_t(env
, ctx
);
4878 case OPCM_32_BIT_INSERT
:
4879 decode_bit_insert(env
, ctx
);
4881 case OPCM_32_BIT_LOGICAL_T2
:
4882 decode_bit_logical_t2(env
, ctx
);
4884 case OPCM_32_BIT_ORAND
:
4885 decode_bit_orand(env
, ctx
);
4887 case OPCM_32_BIT_SH_LOGIC1
:
4888 decode_bit_sh_logic1(env
, ctx
);
4890 case OPCM_32_BIT_SH_LOGIC2
:
4891 decode_bit_sh_logic2(env
, ctx
);
4894 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
4895 decode_bo_addrmode_post_pre_base(env
, ctx
);
4897 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
4898 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
4900 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
4901 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
4903 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
4904 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
4906 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
4907 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
4909 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
4910 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
4913 case OPC1_32_BOL_LD_A_LONGOFF
:
4914 case OPC1_32_BOL_LD_W_LONGOFF
:
4915 case OPC1_32_BOL_LEA_LONGOFF
:
4916 case OPC1_32_BOL_ST_W_LONGOFF
:
4917 case OPC1_32_BOL_ST_A_LONGOFF
:
4918 case OPC1_32_BOL_LD_B_LONGOFF
:
4919 case OPC1_32_BOL_LD_BU_LONGOFF
:
4920 case OPC1_32_BOL_LD_H_LONGOFF
:
4921 case OPC1_32_BOL_LD_HU_LONGOFF
:
4922 case OPC1_32_BOL_ST_B_LONGOFF
:
4923 case OPC1_32_BOL_ST_H_LONGOFF
:
4924 decode_bol_opc(env
, ctx
, op1
);
4927 case OPCM_32_BRC_EQ_NEQ
:
4928 case OPCM_32_BRC_GE
:
4929 case OPCM_32_BRC_JLT
:
4930 case OPCM_32_BRC_JNE
:
4931 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
4932 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
4933 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
4934 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
4937 case OPCM_32_BRN_JTT
:
4938 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
4939 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
4940 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
4943 case OPCM_32_BRR_EQ_NEQ
:
4944 case OPCM_32_BRR_ADDR_EQ_NEQ
:
4945 case OPCM_32_BRR_GE
:
4946 case OPCM_32_BRR_JLT
:
4947 case OPCM_32_BRR_JNE
:
4948 case OPCM_32_BRR_JNZ
:
4949 case OPCM_32_BRR_LOOP
:
4950 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
4951 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
4952 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
4953 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
4956 case OPCM_32_RC_LOGICAL_SHIFT
:
4957 decode_rc_logical_shift(env
, ctx
);
4959 case OPCM_32_RC_ACCUMULATOR
:
4960 decode_rc_accumulator(env
, ctx
);
4962 case OPCM_32_RC_SERVICEROUTINE
:
4963 decode_rc_serviceroutine(env
, ctx
);
4965 case OPCM_32_RC_MUL
:
4966 decode_rc_mul(env
, ctx
);
4969 case OPCM_32_RCPW_MASK_INSERT
:
4970 decode_rcpw_insert(env
, ctx
);
4973 case OPC1_32_RCRR_INSERT
:
4974 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
4975 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
4976 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
4977 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
4978 temp
= tcg_const_i32(const16
);
4979 temp2
= tcg_temp_new(); /* width*/
4980 temp3
= tcg_temp_new(); /* pos */
4982 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
4983 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
4985 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
4987 tcg_temp_free(temp
);
4988 tcg_temp_free(temp2
);
4989 tcg_temp_free(temp3
);
4992 case OPCM_32_RCRW_MASK_INSERT
:
4993 decode_rcrw_insert(env
, ctx
);
4996 case OPCM_32_RCR_COND_SELECT
:
4997 decode_rcr_cond_select(env
, ctx
);
4999 case OPCM_32_RCR_MADD
:
5000 decode_rcr_madd(env
, ctx
);
5002 case OPCM_32_RCR_MSUB
:
5003 decode_rcr_msub(env
, ctx
);
5006 case OPC1_32_RLC_ADDI
:
5007 case OPC1_32_RLC_ADDIH
:
5008 case OPC1_32_RLC_ADDIH_A
:
5009 case OPC1_32_RLC_MFCR
:
5010 case OPC1_32_RLC_MOV
:
5011 case OPC1_32_RLC_MOV_64
:
5012 case OPC1_32_RLC_MOV_U
:
5013 case OPC1_32_RLC_MOV_H
:
5014 case OPC1_32_RLC_MOVH_A
:
5015 case OPC1_32_RLC_MTCR
:
5016 decode_rlc_opc(env
, ctx
, op1
);
5019 case OPCM_32_RR_ACCUMULATOR
:
5020 decode_rr_accumulator(env
, ctx
);
5022 case OPCM_32_RR_LOGICAL_SHIFT
:
5023 decode_rr_logical_shift(env
, ctx
);
5025 case OPCM_32_RR_ADRESS
:
5026 decode_rr_address(env
, ctx
);
5028 case OPCM_32_RR_IDIRECT
:
5029 decode_rr_idirect(env
, ctx
);
5031 case OPCM_32_RR_DIVIDE
:
5032 decode_rr_divide(env
, ctx
);
5035 case OPCM_32_RR1_MUL
:
5036 decode_rr1_mul(env
, ctx
);
5041 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
5043 /* 16-Bit Instruction */
5044 if ((ctx
->opcode
& 0x1) == 0) {
5045 ctx
->next_pc
= ctx
->pc
+ 2;
5046 decode_16Bit_opc(env
, ctx
);
5047 /* 32-Bit Instruction */
5049 ctx
->next_pc
= ctx
->pc
+ 4;
5050 decode_32Bit_opc(env
, ctx
);
5055 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
5058 CPUState
*cs
= CPU(cpu
);
5059 CPUTriCoreState
*env
= &cpu
->env
;
5061 target_ulong pc_start
;
5063 uint16_t *gen_opc_end
;
5066 qemu_log("search pc %d\n", search_pc
);
5071 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
5075 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
5076 ctx
.bstate
= BS_NONE
;
5077 ctx
.mem_idx
= cpu_mmu_index(env
);
5079 tcg_clear_temp_count();
5081 while (ctx
.bstate
== BS_NONE
) {
5082 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
5083 decode_opc(env
, &ctx
, 0);
5087 if (tcg_ctx
.gen_opc_ptr
>= gen_opc_end
) {
5088 gen_save_pc(ctx
.next_pc
);
5093 gen_save_pc(ctx
.next_pc
);
5097 ctx
.pc
= ctx
.next_pc
;
5100 gen_tb_end(tb
, num_insns
);
5101 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
5103 printf("done_generating search pc\n");
5105 tb
->size
= ctx
.pc
- pc_start
;
5106 tb
->icount
= num_insns
;
5108 if (tcg_check_temp_count()) {
5109 printf("LEAK at %08x\n", env
->PC
);
5113 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5114 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5115 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
5122 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
5124 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
5128 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
5130 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
5134 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
5136 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
5144 void cpu_state_reset(CPUTriCoreState
*env
)
5146 /* Reset Regs to Default Value */
5150 static void tricore_tcg_init_csfr(void)
5152 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
5153 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
5154 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
5155 offsetof(CPUTriCoreState
, PSW
), "PSW");
5156 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
5157 offsetof(CPUTriCoreState
, PC
), "PC");
5158 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
5159 offsetof(CPUTriCoreState
, ICR
), "ICR");
5162 void tricore_tcg_init(void)
5169 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5171 for (i
= 0 ; i
< 16 ; i
++) {
5172 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
5173 offsetof(CPUTriCoreState
, gpr_a
[i
]),
5176 for (i
= 0 ; i
< 16 ; i
++) {
5177 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
5178 offsetof(CPUTriCoreState
, gpr_d
[i
]),
5181 tricore_tcg_init_csfr();
5182 /* init PSW flag cache */
5183 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
5184 offsetof(CPUTriCoreState
, PSW_USB_C
),
5186 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
5187 offsetof(CPUTriCoreState
, PSW_USB_V
),
5189 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
5190 offsetof(CPUTriCoreState
, PSW_USB_SV
),
5192 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
5193 offsetof(CPUTriCoreState
, PSW_USB_AV
),
5195 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
5196 offsetof(CPUTriCoreState
, PSW_USB_SAV
),