2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
205 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
206 ((offset & 0x0fffff) << 1))
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, TCGMemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
219 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
233 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
235 tcg_temp_free_i64(temp
);
238 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
241 TCGv temp
= tcg_temp_new();
242 tcg_gen_addi_tl(temp
, base
, con
);
243 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
247 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
249 TCGv_i64 temp
= tcg_temp_new_i64();
251 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
252 /* write back to two 32 bit regs */
253 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
255 tcg_temp_free_i64(temp
);
258 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, base
, con
);
263 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
267 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
277 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
288 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
290 TCGv temp
= tcg_temp_new();
291 TCGv temp2
= tcg_temp_new();
293 /* temp = (M(EA, word) */
294 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* temp = temp & ~E[a][63:32]) */
296 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
297 /* temp2 = (E[a][31:0] & E[a][63:32]); */
298 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
299 /* temp = temp | temp2; */
300 tcg_gen_or_tl(temp
, temp
, temp2
);
301 /* M(EA, word) = temp; */
302 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_temp_free(temp2
);
308 /* tmp = M(EA, word);
311 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
315 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
322 /* We generate loads and store to core special function register (csfr) through
323 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
324 makros R, A and E, which allow read-only, all and endinit protected access.
325 These makros also specify in which ISA version the csfr was introduced. */
326 #define R(ADDRESS, REG, FEATURE) \
328 if (tricore_feature(env, FEATURE)) { \
329 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
332 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
333 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
334 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
336 /* since we're caching PSW make this a special case */
337 if (offset
== 0xfe04) {
338 gen_helper_psw_read(ret
, cpu_env
);
349 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
350 since no execption occurs */
351 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
353 if (tricore_feature(env, FEATURE)) { \
354 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
357 /* Endinit protected registers
358 TODO: Since the endinit bit is in a register of a not yet implemented
359 watchdog device, we handle endinit protected registers like
360 all-access registers for now. */
361 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
362 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
365 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
366 /* since we're caching PSW make this a special case */
367 if (offset
== 0xfe04) {
368 gen_helper_psw_write(cpu_env
, r1
);
375 /* generate privilege trap */
379 /* Functions for arithmetic instructions */
381 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
383 TCGv t0
= tcg_temp_new_i32();
384 TCGv result
= tcg_temp_new_i32();
385 /* Addition and set V/SV bits */
386 tcg_gen_add_tl(result
, r1
, r2
);
388 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
389 tcg_gen_xor_tl(t0
, r1
, r2
);
390 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
392 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
393 /* Calc AV/SAV bits */
394 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
395 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
397 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
398 /* write back result */
399 tcg_gen_mov_tl(ret
, result
);
401 tcg_temp_free(result
);
406 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
408 TCGv temp
= tcg_temp_new();
409 TCGv_i64 t0
= tcg_temp_new_i64();
410 TCGv_i64 t1
= tcg_temp_new_i64();
411 TCGv_i64 result
= tcg_temp_new_i64();
413 tcg_gen_add_i64(result
, r1
, r2
);
415 tcg_gen_xor_i64(t1
, result
, r1
);
416 tcg_gen_xor_i64(t0
, r1
, r2
);
417 tcg_gen_andc_i64(t1
, t1
, t0
);
418 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
420 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
421 /* calc AV/SAV bits */
422 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
423 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
424 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
426 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
427 /* write back result */
428 tcg_gen_mov_i64(ret
, result
);
431 tcg_temp_free_i64(result
);
432 tcg_temp_free_i64(t0
);
433 tcg_temp_free_i64(t1
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 tcg_temp_free(temp2
);
483 tcg_temp_free(temp3
);
484 tcg_temp_free(temp4
);
487 /* ret = r2 + (r1 * r3); */
488 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
490 TCGv_i64 t1
= tcg_temp_new_i64();
491 TCGv_i64 t2
= tcg_temp_new_i64();
492 TCGv_i64 t3
= tcg_temp_new_i64();
494 tcg_gen_ext_i32_i64(t1
, r1
);
495 tcg_gen_ext_i32_i64(t2
, r2
);
496 tcg_gen_ext_i32_i64(t3
, r3
);
498 tcg_gen_mul_i64(t1
, t1
, t3
);
499 tcg_gen_add_i64(t1
, t2
, t1
);
501 tcg_gen_trunc_i64_i32(ret
, t1
);
504 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
505 /* t1 < -0x80000000 */
506 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
507 tcg_gen_or_i64(t2
, t2
, t3
);
508 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 /* Calc AV/SAV bits */
513 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
514 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
516 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
518 tcg_temp_free_i64(t1
);
519 tcg_temp_free_i64(t2
);
520 tcg_temp_free_i64(t3
);
523 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
525 TCGv temp
= tcg_const_i32(con
);
526 gen_madd32_d(ret
, r1
, r2
, temp
);
531 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
534 TCGv t1
= tcg_temp_new();
535 TCGv t2
= tcg_temp_new();
536 TCGv t3
= tcg_temp_new();
537 TCGv t4
= tcg_temp_new();
539 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
540 /* only the add can overflow */
541 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
543 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
544 tcg_gen_xor_tl(t1
, r2_high
, t2
);
545 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
547 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
548 /* Calc AV/SAV bits */
549 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
550 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
552 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
553 /* write back the result */
554 tcg_gen_mov_tl(ret_low
, t3
);
555 tcg_gen_mov_tl(ret_high
, t4
);
564 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
567 TCGv_i64 t1
= tcg_temp_new_i64();
568 TCGv_i64 t2
= tcg_temp_new_i64();
569 TCGv_i64 t3
= tcg_temp_new_i64();
571 tcg_gen_extu_i32_i64(t1
, r1
);
572 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
573 tcg_gen_extu_i32_i64(t3
, r3
);
575 tcg_gen_mul_i64(t1
, t1
, t3
);
576 tcg_gen_add_i64(t2
, t2
, t1
);
577 /* write back result */
578 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
579 /* only the add overflows, if t2 < t1
581 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
582 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
583 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
585 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
586 /* Calc AV/SAV bits */
587 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
588 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
590 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
592 tcg_temp_free_i64(t1
);
593 tcg_temp_free_i64(t2
);
594 tcg_temp_free_i64(t3
);
598 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
601 TCGv temp
= tcg_const_i32(con
);
602 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
607 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
610 TCGv temp
= tcg_const_i32(con
);
611 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
616 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
617 TCGv r3
, uint32_t n
, uint32_t mode
)
619 TCGv temp
= tcg_const_i32(n
);
620 TCGv temp2
= tcg_temp_new();
621 TCGv_i64 temp64
= tcg_temp_new_i64();
624 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
627 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
630 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
633 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
636 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
637 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
638 tcg_gen_add_tl
, tcg_gen_add_tl
);
640 tcg_temp_free(temp2
);
641 tcg_temp_free_i64(temp64
);
644 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
647 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
648 TCGv r3
, uint32_t n
, uint32_t mode
)
650 TCGv temp
= tcg_const_i32(n
);
651 TCGv temp2
= tcg_temp_new();
652 TCGv temp3
= tcg_temp_new();
653 TCGv_i64 temp64
= tcg_temp_new_i64();
657 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
660 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
663 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
666 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
669 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
670 gen_adds(ret_low
, r1_low
, temp
);
671 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
672 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
673 gen_adds(ret_high
, r1_high
, temp2
);
675 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
676 /* combine av bits */
677 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
680 tcg_temp_free(temp2
);
681 tcg_temp_free(temp3
);
682 tcg_temp_free_i64(temp64
);
687 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
688 TCGv r3
, uint32_t n
, uint32_t mode
)
690 TCGv temp
= tcg_const_i32(n
);
691 TCGv_i64 temp64
= tcg_temp_new_i64();
692 TCGv_i64 temp64_2
= tcg_temp_new_i64();
693 TCGv_i64 temp64_3
= tcg_temp_new_i64();
696 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
699 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
702 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
705 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
708 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
709 gen_add64_d(temp64_3
, temp64_2
, temp64
);
710 /* write back result */
711 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
714 tcg_temp_free_i64(temp64
);
715 tcg_temp_free_i64(temp64_2
);
716 tcg_temp_free_i64(temp64_3
);
720 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
721 TCGv r3
, uint32_t n
, uint32_t mode
)
723 TCGv temp
= tcg_const_i32(n
);
724 TCGv_i64 temp64
= tcg_temp_new_i64();
725 TCGv_i64 temp64_2
= tcg_temp_new_i64();
728 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
731 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
734 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
737 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
740 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
741 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
742 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
745 tcg_temp_free_i64(temp64
);
746 tcg_temp_free_i64(temp64_2
);
750 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
753 TCGv temp
= tcg_const_i32(n
);
754 TCGv_i64 temp64
= tcg_temp_new_i64();
757 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
760 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
763 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
766 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
769 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
772 tcg_temp_free_i64(temp64
);
776 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
778 TCGv temp
= tcg_temp_new();
779 TCGv temp2
= tcg_temp_new();
781 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
782 tcg_gen_shli_tl(temp
, r1
, 16);
783 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
786 tcg_temp_free(temp2
);
790 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
791 uint32_t n
, uint32_t mode
)
793 TCGv temp
= tcg_const_i32(n
);
794 TCGv_i64 temp64
= tcg_temp_new_i64();
797 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
800 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
803 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
806 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
809 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
812 tcg_temp_free_i64(temp64
);
816 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
818 TCGv temp
= tcg_temp_new();
819 TCGv temp2
= tcg_temp_new();
821 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
822 tcg_gen_shli_tl(temp
, r1
, 16);
823 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
826 tcg_temp_free(temp2
);
830 /* ret = r2 - (r1 * r3); */
831 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
833 TCGv_i64 t1
= tcg_temp_new_i64();
834 TCGv_i64 t2
= tcg_temp_new_i64();
835 TCGv_i64 t3
= tcg_temp_new_i64();
837 tcg_gen_ext_i32_i64(t1
, r1
);
838 tcg_gen_ext_i32_i64(t2
, r2
);
839 tcg_gen_ext_i32_i64(t3
, r3
);
841 tcg_gen_mul_i64(t1
, t1
, t3
);
842 tcg_gen_sub_i64(t1
, t2
, t1
);
844 tcg_gen_trunc_i64_i32(ret
, t1
);
847 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
848 /* result < -0x80000000 */
849 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
850 tcg_gen_or_i64(t2
, t2
, t3
);
851 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
852 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
855 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
856 /* Calc AV/SAV bits */
857 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
858 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
860 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
862 tcg_temp_free_i64(t1
);
863 tcg_temp_free_i64(t2
);
864 tcg_temp_free_i64(t3
);
867 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
869 TCGv temp
= tcg_const_i32(con
);
870 gen_msub32_d(ret
, r1
, r2
, temp
);
875 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
878 TCGv t1
= tcg_temp_new();
879 TCGv t2
= tcg_temp_new();
880 TCGv t3
= tcg_temp_new();
881 TCGv t4
= tcg_temp_new();
883 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
884 /* only the sub can overflow */
885 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
887 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
888 tcg_gen_xor_tl(t1
, r2_high
, t2
);
889 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
891 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
892 /* Calc AV/SAV bits */
893 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
894 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
896 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
897 /* write back the result */
898 tcg_gen_mov_tl(ret_low
, t3
);
899 tcg_gen_mov_tl(ret_high
, t4
);
908 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
911 TCGv temp
= tcg_const_i32(con
);
912 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
917 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
920 TCGv_i64 t1
= tcg_temp_new_i64();
921 TCGv_i64 t2
= tcg_temp_new_i64();
922 TCGv_i64 t3
= tcg_temp_new_i64();
924 tcg_gen_extu_i32_i64(t1
, r1
);
925 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
926 tcg_gen_extu_i32_i64(t3
, r3
);
928 tcg_gen_mul_i64(t1
, t1
, t3
);
929 tcg_gen_sub_i64(t3
, t2
, t1
);
930 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
931 /* calc V bit, only the sub can overflow, if t1 > t2 */
932 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
933 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
934 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
936 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
937 /* Calc AV/SAV bits */
938 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
939 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
941 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
943 tcg_temp_free_i64(t1
);
944 tcg_temp_free_i64(t2
);
945 tcg_temp_free_i64(t3
);
949 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
952 TCGv temp
= tcg_const_i32(con
);
953 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
957 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
959 TCGv temp
= tcg_const_i32(r2
);
960 gen_add_d(ret
, r1
, temp
);
963 /* calculate the carry bit too */
964 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
966 TCGv t0
= tcg_temp_new_i32();
967 TCGv result
= tcg_temp_new_i32();
969 tcg_gen_movi_tl(t0
, 0);
970 /* Addition and set C/V/SV bits */
971 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
973 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
974 tcg_gen_xor_tl(t0
, r1
, r2
);
975 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
977 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
978 /* Calc AV/SAV bits */
979 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
980 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
982 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
983 /* write back result */
984 tcg_gen_mov_tl(ret
, result
);
986 tcg_temp_free(result
);
990 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
992 TCGv temp
= tcg_const_i32(con
);
993 gen_add_CC(ret
, r1
, temp
);
997 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
999 TCGv carry
= tcg_temp_new_i32();
1000 TCGv t0
= tcg_temp_new_i32();
1001 TCGv result
= tcg_temp_new_i32();
1003 tcg_gen_movi_tl(t0
, 0);
1004 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1005 /* Addition, carry and set C/V/SV bits */
1006 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1007 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1009 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1010 tcg_gen_xor_tl(t0
, r1
, r2
);
1011 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1013 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1014 /* Calc AV/SAV bits */
1015 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1016 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1018 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1019 /* write back result */
1020 tcg_gen_mov_tl(ret
, result
);
1022 tcg_temp_free(result
);
1024 tcg_temp_free(carry
);
1027 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1029 TCGv temp
= tcg_const_i32(con
);
1030 gen_addc_CC(ret
, r1
, temp
);
1031 tcg_temp_free(temp
);
1034 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1037 TCGv temp
= tcg_temp_new();
1038 TCGv temp2
= tcg_temp_new();
1039 TCGv result
= tcg_temp_new();
1040 TCGv mask
= tcg_temp_new();
1041 TCGv t0
= tcg_const_i32(0);
1043 /* create mask for sticky bits */
1044 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1045 tcg_gen_shli_tl(mask
, mask
, 31);
1047 tcg_gen_add_tl(result
, r1
, r2
);
1049 tcg_gen_xor_tl(temp
, result
, r1
);
1050 tcg_gen_xor_tl(temp2
, r1
, r2
);
1051 tcg_gen_andc_tl(temp
, temp
, temp2
);
1052 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1054 tcg_gen_and_tl(temp
, temp
, mask
);
1055 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1057 tcg_gen_add_tl(temp
, result
, result
);
1058 tcg_gen_xor_tl(temp
, temp
, result
);
1059 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1061 tcg_gen_and_tl(temp
, temp
, mask
);
1062 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1063 /* write back result */
1064 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1067 tcg_temp_free(temp
);
1068 tcg_temp_free(temp2
);
1069 tcg_temp_free(result
);
1070 tcg_temp_free(mask
);
1073 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1076 TCGv temp
= tcg_const_i32(r2
);
1077 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1078 tcg_temp_free(temp
);
1081 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1083 TCGv temp
= tcg_temp_new_i32();
1084 TCGv result
= tcg_temp_new_i32();
1086 tcg_gen_sub_tl(result
, r1
, r2
);
1088 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1089 tcg_gen_xor_tl(temp
, r1
, r2
);
1090 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1092 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1094 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1095 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1097 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1098 /* write back result */
1099 tcg_gen_mov_tl(ret
, result
);
1101 tcg_temp_free(temp
);
1102 tcg_temp_free(result
);
1105 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1107 TCGv result
= tcg_temp_new();
1108 TCGv temp
= tcg_temp_new();
1110 tcg_gen_sub_tl(result
, r1
, r2
);
1112 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1114 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1115 tcg_gen_xor_tl(temp
, r1
, r2
);
1116 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1118 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1120 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1121 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1123 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1124 /* write back result */
1125 tcg_gen_mov_tl(ret
, result
);
1127 tcg_temp_free(result
);
1128 tcg_temp_free(temp
);
1131 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1133 TCGv temp
= tcg_temp_new();
1134 tcg_gen_not_tl(temp
, r2
);
1135 gen_addc_CC(ret
, r1
, temp
);
1136 tcg_temp_free(temp
);
1139 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1142 TCGv temp
= tcg_temp_new();
1143 TCGv temp2
= tcg_temp_new();
1144 TCGv result
= tcg_temp_new();
1145 TCGv mask
= tcg_temp_new();
1146 TCGv t0
= tcg_const_i32(0);
1148 /* create mask for sticky bits */
1149 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1150 tcg_gen_shli_tl(mask
, mask
, 31);
1152 tcg_gen_sub_tl(result
, r1
, r2
);
1154 tcg_gen_xor_tl(temp
, result
, r1
);
1155 tcg_gen_xor_tl(temp2
, r1
, r2
);
1156 tcg_gen_and_tl(temp
, temp
, temp2
);
1157 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1159 tcg_gen_and_tl(temp
, temp
, mask
);
1160 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1162 tcg_gen_add_tl(temp
, result
, result
);
1163 tcg_gen_xor_tl(temp
, temp
, result
);
1164 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1166 tcg_gen_and_tl(temp
, temp
, mask
);
1167 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1168 /* write back result */
1169 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1172 tcg_temp_free(temp
);
1173 tcg_temp_free(temp2
);
1174 tcg_temp_free(result
);
1175 tcg_temp_free(mask
);
1178 static inline void gen_abs(TCGv ret
, TCGv r1
)
1180 TCGv temp
= tcg_temp_new();
1181 TCGv t0
= tcg_const_i32(0);
1183 tcg_gen_neg_tl(temp
, r1
);
1184 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
1185 /* overflow can only happen, if r1 = 0x80000000 */
1186 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
1187 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1189 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1191 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1192 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1194 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1196 tcg_temp_free(temp
);
1200 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
1202 TCGv temp
= tcg_temp_new_i32();
1203 TCGv result
= tcg_temp_new_i32();
1205 tcg_gen_sub_tl(result
, r1
, r2
);
1206 tcg_gen_sub_tl(temp
, r2
, r1
);
1207 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
1210 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1211 tcg_gen_xor_tl(temp
, result
, r2
);
1212 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
1213 tcg_gen_xor_tl(temp
, r1
, r2
);
1214 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1216 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1218 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1219 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1221 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1222 /* write back result */
1223 tcg_gen_mov_tl(ret
, result
);
1225 tcg_temp_free(temp
);
1226 tcg_temp_free(result
);
1229 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
1231 TCGv temp
= tcg_const_i32(con
);
1232 gen_absdif(ret
, r1
, temp
);
1233 tcg_temp_free(temp
);
1236 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
1238 TCGv temp
= tcg_const_i32(con
);
1239 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
1240 tcg_temp_free(temp
);
1243 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
1245 TCGv high
= tcg_temp_new();
1246 TCGv low
= tcg_temp_new();
1248 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
1249 tcg_gen_mov_tl(ret
, low
);
1251 tcg_gen_sari_tl(low
, low
, 31);
1252 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
1253 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1255 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1257 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1258 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1260 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1262 tcg_temp_free(high
);
1266 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
1268 TCGv temp
= tcg_const_i32(con
);
1269 gen_mul_i32s(ret
, r1
, temp
);
1270 tcg_temp_free(temp
);
1273 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
1275 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
1277 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1279 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1281 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1282 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1284 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1287 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
1290 TCGv temp
= tcg_const_i32(con
);
1291 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
1292 tcg_temp_free(temp
);
1295 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
1297 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
1299 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1301 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1303 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1304 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1306 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1309 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
1312 TCGv temp
= tcg_const_i32(con
);
1313 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
1314 tcg_temp_free(temp
);
1317 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
1319 TCGv temp
= tcg_const_i32(con
);
1320 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
1321 tcg_temp_free(temp
);
1324 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
1326 TCGv temp
= tcg_const_i32(con
);
1327 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
1328 tcg_temp_free(temp
);
1330 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
1331 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1333 TCGv temp
= tcg_const_i32(con
);
1334 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
1335 tcg_temp_free(temp
);
1338 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1340 TCGv temp
= tcg_const_i32(con
);
1341 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
1342 tcg_temp_free(temp
);
1346 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
1348 TCGv temp
= tcg_temp_new();
1349 TCGv_i64 temp_64
= tcg_temp_new_i64();
1350 TCGv_i64 temp2_64
= tcg_temp_new_i64();
1353 if (up_shift
== 32) {
1354 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
1355 } else if (up_shift
== 16) {
1356 tcg_gen_ext_i32_i64(temp_64
, arg1
);
1357 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
1359 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
1360 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
1361 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
1363 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
1366 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1367 } else { /* n is exspected to be 1 */
1368 tcg_gen_ext_i32_i64(temp_64
, arg1
);
1369 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
1371 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
1373 if (up_shift
== 0) {
1374 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
1376 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
1378 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
1379 /* overflow only occours if r1 = r2 = 0x8000 */
1380 if (up_shift
== 0) {/* result is 64 bit */
1381 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
1383 } else { /* result is 32 bit */
1384 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
1387 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1388 /* calc sv overflow bit */
1389 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1391 /* calc av overflow bit */
1392 if (up_shift
== 0) {
1393 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1394 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1396 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
1397 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
1399 /* calc sav overflow bit */
1400 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1401 tcg_temp_free(temp
);
1402 tcg_temp_free_i64(temp_64
);
1403 tcg_temp_free_i64(temp2_64
);
1407 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
1409 TCGv temp
= tcg_temp_new();
1411 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1412 } else { /* n is exspected to be 1 */
1413 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1414 tcg_gen_shli_tl(ret
, ret
, 1);
1415 /* catch special case r1 = r2 = 0x8000 */
1416 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
1417 tcg_gen_sub_tl(ret
, ret
, temp
);
1420 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1421 /* calc av overflow bit */
1422 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1423 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1424 /* calc sav overflow bit */
1425 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1427 tcg_temp_free(temp
);
1430 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
1432 TCGv temp
= tcg_temp_new();
1434 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1435 tcg_gen_addi_tl(ret
, ret
, 0x8000);
1437 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1438 tcg_gen_shli_tl(ret
, ret
, 1);
1439 tcg_gen_addi_tl(ret
, ret
, 0x8000);
1440 /* catch special case r1 = r2 = 0x8000 */
1441 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
1442 tcg_gen_muli_tl(temp
, temp
, 0x8001);
1443 tcg_gen_sub_tl(ret
, ret
, temp
);
1446 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1447 /* calc av overflow bit */
1448 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1449 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1450 /* calc sav overflow bit */
1451 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1452 /* cut halfword off */
1453 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
1455 tcg_temp_free(temp
);
1459 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1462 TCGv_i64 temp64
= tcg_temp_new_i64();
1463 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1464 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
1465 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1466 tcg_temp_free_i64(temp64
);
1470 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1473 TCGv temp
= tcg_const_i32(con
);
1474 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1475 tcg_temp_free(temp
);
1479 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1482 TCGv_i64 temp64
= tcg_temp_new_i64();
1483 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1484 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
1485 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1486 tcg_temp_free_i64(temp64
);
1490 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1493 TCGv temp
= tcg_const_i32(con
);
1494 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1495 tcg_temp_free(temp
);
1498 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1500 TCGv temp
= tcg_const_i32(con
);
1501 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
1502 tcg_temp_free(temp
);
1505 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1507 TCGv temp
= tcg_const_i32(con
);
1508 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
1509 tcg_temp_free(temp
);
1513 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1516 TCGv_i64 temp64
= tcg_temp_new_i64();
1517 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1518 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
1519 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1520 tcg_temp_free_i64(temp64
);
1524 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1527 TCGv temp
= tcg_const_i32(con
);
1528 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1529 tcg_temp_free(temp
);
1533 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1536 TCGv_i64 temp64
= tcg_temp_new_i64();
1537 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1538 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
1539 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1540 tcg_temp_free_i64(temp64
);
1544 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1547 TCGv temp
= tcg_const_i32(con
);
1548 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1549 tcg_temp_free(temp
);
1552 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
1554 TCGv sat_neg
= tcg_const_i32(low
);
1555 TCGv temp
= tcg_const_i32(up
);
1557 /* sat_neg = (arg < low ) ? low : arg; */
1558 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
1560 /* ret = (sat_neg > up ) ? up : sat_neg; */
1561 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
1563 tcg_temp_free(sat_neg
);
1564 tcg_temp_free(temp
);
1567 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
1569 TCGv temp
= tcg_const_i32(up
);
1570 /* sat_neg = (arg > up ) ? up : arg; */
1571 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
1572 tcg_temp_free(temp
);
1575 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1577 if (shift_count
== -32) {
1578 tcg_gen_movi_tl(ret
, 0);
1579 } else if (shift_count
>= 0) {
1580 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1582 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
1586 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
1588 TCGv temp_low
, temp_high
;
1590 if (shiftcount
== -16) {
1591 tcg_gen_movi_tl(ret
, 0);
1593 temp_high
= tcg_temp_new();
1594 temp_low
= tcg_temp_new();
1596 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
1597 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
1598 gen_shi(temp_low
, temp_low
, shiftcount
);
1599 gen_shi(ret
, temp_high
, shiftcount
);
1600 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
1602 tcg_temp_free(temp_low
);
1603 tcg_temp_free(temp_high
);
1607 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
1609 uint32_t msk
, msk_start
;
1610 TCGv temp
= tcg_temp_new();
1611 TCGv temp2
= tcg_temp_new();
1612 TCGv t_0
= tcg_const_i32(0);
1614 if (shift_count
== 0) {
1615 /* Clear PSW.C and PSW.V */
1616 tcg_gen_movi_tl(cpu_PSW_C
, 0);
1617 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
1618 tcg_gen_mov_tl(ret
, r1
);
1619 } else if (shift_count
== -32) {
1621 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
1622 /* fill ret completly with sign bit */
1623 tcg_gen_sari_tl(ret
, r1
, 31);
1625 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1626 } else if (shift_count
> 0) {
1627 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
1628 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
1631 msk_start
= 32 - shift_count
;
1632 msk
= ((1 << shift_count
) - 1) << msk_start
;
1633 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1634 /* calc v/sv bits */
1635 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
1636 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
1637 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
1638 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1640 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
1642 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1644 tcg_temp_free(t_max
);
1645 tcg_temp_free(t_min
);
1648 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1650 msk
= (1 << -shift_count
) - 1;
1651 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1653 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1655 /* calc av overflow bit */
1656 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1657 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1658 /* calc sav overflow bit */
1659 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1661 tcg_temp_free(temp
);
1662 tcg_temp_free(temp2
);
1666 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
1668 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
1671 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
1673 TCGv temp
= tcg_const_i32(con
);
1674 gen_shas(ret
, r1
, temp
);
1675 tcg_temp_free(temp
);
1678 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1682 if (shift_count
== 0) {
1683 tcg_gen_mov_tl(ret
, r1
);
1684 } else if (shift_count
> 0) {
1685 low
= tcg_temp_new();
1686 high
= tcg_temp_new();
1688 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
1689 tcg_gen_shli_tl(low
, r1
, shift_count
);
1690 tcg_gen_shli_tl(ret
, high
, shift_count
);
1691 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1694 tcg_temp_free(high
);
1696 low
= tcg_temp_new();
1697 high
= tcg_temp_new();
1699 tcg_gen_ext16s_tl(low
, r1
);
1700 tcg_gen_sari_tl(low
, low
, -shift_count
);
1701 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1702 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1705 tcg_temp_free(high
);
1710 /* ret = {ret[30:0], (r1 cond r2)}; */
1711 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1713 TCGv temp
= tcg_temp_new();
1714 TCGv temp2
= tcg_temp_new();
1716 tcg_gen_shli_tl(temp
, ret
, 1);
1717 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
1718 tcg_gen_or_tl(ret
, temp
, temp2
);
1720 tcg_temp_free(temp
);
1721 tcg_temp_free(temp2
);
1724 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
1726 TCGv temp
= tcg_const_i32(con
);
1727 gen_sh_cond(cond
, ret
, r1
, temp
);
1728 tcg_temp_free(temp
);
1731 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
1733 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
1736 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
1738 TCGv temp
= tcg_const_i32(con
);
1739 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
1740 tcg_temp_free(temp
);
1743 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
1745 TCGv temp
= tcg_const_i32(con
);
1746 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
1747 tcg_temp_free(temp
);
1750 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
1752 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
1755 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
1757 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
1760 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
1762 void(*op1
)(TCGv
, TCGv
, TCGv
),
1763 void(*op2
)(TCGv
, TCGv
, TCGv
))
1767 temp1
= tcg_temp_new();
1768 temp2
= tcg_temp_new();
1770 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1771 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1773 (*op1
)(temp1
, temp1
, temp2
);
1774 (*op2
)(temp1
, ret
, temp1
);
1776 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
1778 tcg_temp_free(temp1
);
1779 tcg_temp_free(temp2
);
1782 /* ret = r1[pos1] op1 r2[pos2]; */
1783 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
1785 void(*op1
)(TCGv
, TCGv
, TCGv
))
1789 temp1
= tcg_temp_new();
1790 temp2
= tcg_temp_new();
1792 tcg_gen_shri_tl(temp2
, r2
, pos2
);
1793 tcg_gen_shri_tl(temp1
, r1
, pos1
);
1795 (*op1
)(ret
, temp1
, temp2
);
1797 tcg_gen_andi_tl(ret
, ret
, 0x1);
1799 tcg_temp_free(temp1
);
1800 tcg_temp_free(temp2
);
1803 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
1804 void(*op
)(TCGv
, TCGv
, TCGv
))
1806 TCGv temp
= tcg_temp_new();
1807 TCGv temp2
= tcg_temp_new();
1808 /* temp = (arg1 cond arg2 )*/
1809 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
1811 tcg_gen_andi_tl(temp2
, ret
, 0x1);
1812 /* temp = temp insn temp2 */
1813 (*op
)(temp
, temp
, temp2
);
1814 /* ret = {ret[31:1], temp} */
1815 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
1817 tcg_temp_free(temp
);
1818 tcg_temp_free(temp2
);
1822 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
1823 void(*op
)(TCGv
, TCGv
, TCGv
))
1825 TCGv temp
= tcg_const_i32(con
);
1826 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
1827 tcg_temp_free(temp
);
1830 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
1831 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1833 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
1834 tcg_gen_neg_tl(ret
, ret
);
1837 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
1839 TCGv b0
= tcg_temp_new();
1840 TCGv b1
= tcg_temp_new();
1841 TCGv b2
= tcg_temp_new();
1842 TCGv b3
= tcg_temp_new();
1845 tcg_gen_andi_tl(b0
, r1
, 0xff);
1846 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
1849 tcg_gen_andi_tl(b1
, r1
, 0xff00);
1850 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
1853 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
1854 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
1857 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
1858 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
1861 tcg_gen_or_tl(ret
, b0
, b1
);
1862 tcg_gen_or_tl(ret
, ret
, b2
);
1863 tcg_gen_or_tl(ret
, ret
, b3
);
1871 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
1873 TCGv h0
= tcg_temp_new();
1874 TCGv h1
= tcg_temp_new();
1877 tcg_gen_andi_tl(h0
, r1
, 0xffff);
1878 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
1881 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
1882 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
1885 tcg_gen_or_tl(ret
, h0
, h1
);
1890 /* mask = ((1 << width) -1) << pos;
1891 ret = (r1 & ~mask) | (r2 << pos) & mask); */
1892 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
1894 TCGv mask
= tcg_temp_new();
1895 TCGv temp
= tcg_temp_new();
1896 TCGv temp2
= tcg_temp_new();
1898 tcg_gen_movi_tl(mask
, 1);
1899 tcg_gen_shl_tl(mask
, mask
, width
);
1900 tcg_gen_subi_tl(mask
, mask
, 1);
1901 tcg_gen_shl_tl(mask
, mask
, pos
);
1903 tcg_gen_shl_tl(temp
, r2
, pos
);
1904 tcg_gen_and_tl(temp
, temp
, mask
);
1905 tcg_gen_andc_tl(temp2
, r1
, mask
);
1906 tcg_gen_or_tl(ret
, temp
, temp2
);
1908 tcg_temp_free(mask
);
1909 tcg_temp_free(temp
);
1910 tcg_temp_free(temp2
);
1913 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
1915 TCGv_i64 temp
= tcg_temp_new_i64();
1917 gen_helper_bsplit(temp
, r1
);
1918 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
1920 tcg_temp_free_i64(temp
);
1923 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
1925 TCGv_i64 temp
= tcg_temp_new_i64();
1927 gen_helper_unpack(temp
, r1
);
1928 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
1930 tcg_temp_free_i64(temp
);
1934 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
1936 TCGv_i64 ret
= tcg_temp_new_i64();
1938 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
1939 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
1941 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
1943 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
1945 tcg_temp_free_i64(ret
);
1949 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
1951 TCGv_i64 ret
= tcg_temp_new_i64();
1953 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
1954 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
1956 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
1958 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
1960 tcg_temp_free_i64(ret
);
1963 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
1965 TCGv temp
= tcg_temp_new();
1967 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
1968 tcg_gen_xor_tl(temp
, temp
, arg_low
);
1969 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
1970 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
1971 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
1973 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1974 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1975 tcg_temp_free(temp
);
1978 static void gen_calc_usb_mulr_h(TCGv arg
)
1980 TCGv temp
= tcg_temp_new();
1982 tcg_gen_add_tl(temp
, arg
, arg
);
1983 tcg_gen_xor_tl(temp
, temp
, arg
);
1984 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
1985 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
1987 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1989 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1990 tcg_temp_free(temp
);
1993 /* helpers for generating program flow micro-ops */
1995 static inline void gen_save_pc(target_ulong pc
)
1997 tcg_gen_movi_tl(cpu_PC
, pc
);
2000 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2002 TranslationBlock
*tb
;
2004 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
2005 likely(!ctx
->singlestep_enabled
)) {
2008 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
2011 if (ctx
->singlestep_enabled
) {
2012 /* raise exception debug */
2018 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2019 TCGv r2
, int16_t address
)
2022 jumpLabel
= gen_new_label();
2023 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2025 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
2027 gen_set_label(jumpLabel
);
2028 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
2031 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2032 int r2
, int16_t address
)
2034 TCGv temp
= tcg_const_i32(r2
);
2035 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2036 tcg_temp_free(temp
);
2039 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2042 l1
= gen_new_label();
2044 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2045 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2046 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
2048 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
2051 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2052 int r2
, int32_t constant
, int32_t offset
)
2058 /* SB-format jumps */
2061 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2063 case OPC1_32_B_CALL
:
2064 case OPC1_16_SB_CALL
:
2065 gen_helper_1arg(call
, ctx
->next_pc
);
2066 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2069 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2071 case OPC1_16_SB_JNZ
:
2072 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2074 /* SBC-format jumps */
2075 case OPC1_16_SBC_JEQ
:
2076 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2078 case OPC1_16_SBC_JNE
:
2079 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2081 /* SBRN-format jumps */
2082 case OPC1_16_SBRN_JZ_T
:
2083 temp
= tcg_temp_new();
2084 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2085 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2086 tcg_temp_free(temp
);
2088 case OPC1_16_SBRN_JNZ_T
:
2089 temp
= tcg_temp_new();
2090 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2091 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2092 tcg_temp_free(temp
);
2094 /* SBR-format jumps */
2095 case OPC1_16_SBR_JEQ
:
2096 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2099 case OPC1_16_SBR_JNE
:
2100 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2103 case OPC1_16_SBR_JNZ
:
2104 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2106 case OPC1_16_SBR_JNZ_A
:
2107 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2109 case OPC1_16_SBR_JGEZ
:
2110 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2112 case OPC1_16_SBR_JGTZ
:
2113 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2115 case OPC1_16_SBR_JLEZ
:
2116 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2118 case OPC1_16_SBR_JLTZ
:
2119 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2121 case OPC1_16_SBR_JZ
:
2122 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2124 case OPC1_16_SBR_JZ_A
:
2125 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2127 case OPC1_16_SBR_LOOP
:
2128 gen_loop(ctx
, r1
, offset
* 2 - 32);
2130 /* SR-format jumps */
2132 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2135 case OPC2_16_SR_RET
:
2136 gen_helper_ret(cpu_env
);
2140 case OPC1_32_B_CALLA
:
2141 gen_helper_1arg(call
, ctx
->next_pc
);
2142 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2145 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2148 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2151 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2152 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2155 case OPCM_32_BRC_EQ_NEQ
:
2156 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
2157 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
2159 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
2162 case OPCM_32_BRC_GE
:
2163 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
2164 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
2166 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2167 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
2171 case OPCM_32_BRC_JLT
:
2172 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
2173 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
2175 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2176 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
2180 case OPCM_32_BRC_JNE
:
2181 temp
= tcg_temp_new();
2182 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
2183 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2184 /* subi is unconditional */
2185 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2186 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2188 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2189 /* addi is unconditional */
2190 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2191 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2193 tcg_temp_free(temp
);
2196 case OPCM_32_BRN_JTT
:
2197 n
= MASK_OP_BRN_N(ctx
->opcode
);
2199 temp
= tcg_temp_new();
2200 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
2202 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
2203 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2205 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2207 tcg_temp_free(temp
);
2210 case OPCM_32_BRR_EQ_NEQ
:
2211 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
2212 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2215 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2219 case OPCM_32_BRR_ADDR_EQ_NEQ
:
2220 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
2221 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2224 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2228 case OPCM_32_BRR_GE
:
2229 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
2230 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2233 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2237 case OPCM_32_BRR_JLT
:
2238 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
2239 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2242 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2246 case OPCM_32_BRR_LOOP
:
2247 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
2248 gen_loop(ctx
, r1
, offset
* 2);
2250 /* OPC2_32_BRR_LOOPU */
2251 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2254 case OPCM_32_BRR_JNE
:
2255 temp
= tcg_temp_new();
2256 temp2
= tcg_temp_new();
2257 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
2258 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2259 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2260 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2261 /* subi is unconditional */
2262 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2263 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2265 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2266 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2267 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2268 /* addi is unconditional */
2269 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2270 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2272 tcg_temp_free(temp
);
2273 tcg_temp_free(temp2
);
2275 case OPCM_32_BRR_JNZ
:
2276 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
2277 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2279 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2283 printf("Branch Error at %x\n", ctx
->pc
);
2285 ctx
->bstate
= BS_BRANCH
;
2290 * Functions for decoding instructions
2293 static void decode_src_opc(DisasContext
*ctx
, int op1
)
2299 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
2300 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
2303 case OPC1_16_SRC_ADD
:
2304 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2306 case OPC1_16_SRC_ADD_A15
:
2307 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
2309 case OPC1_16_SRC_ADD_15A
:
2310 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
2312 case OPC1_16_SRC_ADD_A
:
2313 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
2315 case OPC1_16_SRC_CADD
:
2316 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
2319 case OPC1_16_SRC_CADDN
:
2320 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
2323 case OPC1_16_SRC_CMOV
:
2324 temp
= tcg_const_tl(0);
2325 temp2
= tcg_const_tl(const4
);
2326 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2327 temp2
, cpu_gpr_d
[r1
]);
2328 tcg_temp_free(temp
);
2329 tcg_temp_free(temp2
);
2331 case OPC1_16_SRC_CMOVN
:
2332 temp
= tcg_const_tl(0);
2333 temp2
= tcg_const_tl(const4
);
2334 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2335 temp2
, cpu_gpr_d
[r1
]);
2336 tcg_temp_free(temp
);
2337 tcg_temp_free(temp2
);
2339 case OPC1_16_SRC_EQ
:
2340 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2343 case OPC1_16_SRC_LT
:
2344 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2347 case OPC1_16_SRC_MOV
:
2348 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
2350 case OPC1_16_SRC_MOV_A
:
2351 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
2352 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
2354 case OPC1_16_SRC_SH
:
2355 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2357 case OPC1_16_SRC_SHA
:
2358 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2363 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
2368 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
2369 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
2372 case OPC1_16_SRR_ADD
:
2373 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2375 case OPC1_16_SRR_ADD_A15
:
2376 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
2378 case OPC1_16_SRR_ADD_15A
:
2379 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2381 case OPC1_16_SRR_ADD_A
:
2382 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
2384 case OPC1_16_SRR_ADDS
:
2385 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2387 case OPC1_16_SRR_AND
:
2388 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2390 case OPC1_16_SRR_CMOV
:
2391 temp
= tcg_const_tl(0);
2392 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2393 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
2394 tcg_temp_free(temp
);
2396 case OPC1_16_SRR_CMOVN
:
2397 temp
= tcg_const_tl(0);
2398 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2399 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
2400 tcg_temp_free(temp
);
2402 case OPC1_16_SRR_EQ
:
2403 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2406 case OPC1_16_SRR_LT
:
2407 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2410 case OPC1_16_SRR_MOV
:
2411 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2413 case OPC1_16_SRR_MOV_A
:
2414 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
2416 case OPC1_16_SRR_MOV_AA
:
2417 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
2419 case OPC1_16_SRR_MOV_D
:
2420 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
2422 case OPC1_16_SRR_MUL
:
2423 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2425 case OPC1_16_SRR_OR
:
2426 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2428 case OPC1_16_SRR_SUB
:
2429 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2431 case OPC1_16_SRR_SUB_A15B
:
2432 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
2434 case OPC1_16_SRR_SUB_15AB
:
2435 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2437 case OPC1_16_SRR_SUBS
:
2438 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2440 case OPC1_16_SRR_XOR
:
2441 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2446 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
2450 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
2451 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
2454 case OPC1_16_SSR_ST_A
:
2455 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2457 case OPC1_16_SSR_ST_A_POSTINC
:
2458 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2459 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2461 case OPC1_16_SSR_ST_B
:
2462 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2464 case OPC1_16_SSR_ST_B_POSTINC
:
2465 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2466 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
2468 case OPC1_16_SSR_ST_H
:
2469 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
2471 case OPC1_16_SSR_ST_H_POSTINC
:
2472 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
2473 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
2475 case OPC1_16_SSR_ST_W
:
2476 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2478 case OPC1_16_SSR_ST_W_POSTINC
:
2479 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2480 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2485 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
2489 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
2492 case OPC1_16_SC_AND
:
2493 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
2495 case OPC1_16_SC_BISR
:
2496 gen_helper_1arg(bisr
, const16
& 0xff);
2498 case OPC1_16_SC_LD_A
:
2499 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2501 case OPC1_16_SC_LD_W
:
2502 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2504 case OPC1_16_SC_MOV
:
2505 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
2508 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
2510 case OPC1_16_SC_ST_A
:
2511 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2513 case OPC1_16_SC_ST_W
:
2514 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2516 case OPC1_16_SC_SUB_A
:
2517 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
2522 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
2526 r1
= MASK_OP_SLR_D(ctx
->opcode
);
2527 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
2531 case OPC1_16_SLR_LD_A
:
2532 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2534 case OPC1_16_SLR_LD_A_POSTINC
:
2535 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2536 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2538 case OPC1_16_SLR_LD_BU
:
2539 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2541 case OPC1_16_SLR_LD_BU_POSTINC
:
2542 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2543 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
2545 case OPC1_16_SLR_LD_H
:
2546 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2548 case OPC1_16_SLR_LD_H_POSTINC
:
2549 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2550 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
2552 case OPC1_16_SLR_LD_W
:
2553 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2555 case OPC1_16_SLR_LD_W_POSTINC
:
2556 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2557 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2562 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
2567 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
2568 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
2572 case OPC1_16_SRO_LD_A
:
2573 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2575 case OPC1_16_SRO_LD_BU
:
2576 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2578 case OPC1_16_SRO_LD_H
:
2579 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
2581 case OPC1_16_SRO_LD_W
:
2582 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2584 case OPC1_16_SRO_ST_A
:
2585 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2587 case OPC1_16_SRO_ST_B
:
2588 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2590 case OPC1_16_SRO_ST_H
:
2591 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
2593 case OPC1_16_SRO_ST_W
:
2594 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2599 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
2602 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2605 case OPC2_16_SR_NOP
:
2607 case OPC2_16_SR_RET
:
2608 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
2610 case OPC2_16_SR_RFE
:
2611 gen_helper_rfe(cpu_env
);
2613 ctx
->bstate
= BS_BRANCH
;
2615 case OPC2_16_SR_DEBUG
:
2616 /* raise EXCP_DEBUG */
2621 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
2627 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2628 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2631 case OPC2_16_SR_RSUB
:
2632 /* overflow only if r1 = -0x80000000 */
2633 temp
= tcg_const_i32(-0x80000000);
2635 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
2636 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2638 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2640 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2642 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2643 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
2645 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2646 tcg_temp_free(temp
);
2648 case OPC2_16_SR_SAT_B
:
2649 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
2651 case OPC2_16_SR_SAT_BU
:
2652 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
2654 case OPC2_16_SR_SAT_H
:
2655 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
2657 case OPC2_16_SR_SAT_HU
:
2658 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
2663 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2671 op1
= MASK_OP_MAJOR(ctx
->opcode
);
2673 /* handle ADDSC.A opcode only being 6 bit long */
2674 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
2675 op1
= OPC1_16_SRRS_ADDSC_A
;
2679 case OPC1_16_SRC_ADD
:
2680 case OPC1_16_SRC_ADD_A15
:
2681 case OPC1_16_SRC_ADD_15A
:
2682 case OPC1_16_SRC_ADD_A
:
2683 case OPC1_16_SRC_CADD
:
2684 case OPC1_16_SRC_CADDN
:
2685 case OPC1_16_SRC_CMOV
:
2686 case OPC1_16_SRC_CMOVN
:
2687 case OPC1_16_SRC_EQ
:
2688 case OPC1_16_SRC_LT
:
2689 case OPC1_16_SRC_MOV
:
2690 case OPC1_16_SRC_MOV_A
:
2691 case OPC1_16_SRC_SH
:
2692 case OPC1_16_SRC_SHA
:
2693 decode_src_opc(ctx
, op1
);
2696 case OPC1_16_SRR_ADD
:
2697 case OPC1_16_SRR_ADD_A15
:
2698 case OPC1_16_SRR_ADD_15A
:
2699 case OPC1_16_SRR_ADD_A
:
2700 case OPC1_16_SRR_ADDS
:
2701 case OPC1_16_SRR_AND
:
2702 case OPC1_16_SRR_CMOV
:
2703 case OPC1_16_SRR_CMOVN
:
2704 case OPC1_16_SRR_EQ
:
2705 case OPC1_16_SRR_LT
:
2706 case OPC1_16_SRR_MOV
:
2707 case OPC1_16_SRR_MOV_A
:
2708 case OPC1_16_SRR_MOV_AA
:
2709 case OPC1_16_SRR_MOV_D
:
2710 case OPC1_16_SRR_MUL
:
2711 case OPC1_16_SRR_OR
:
2712 case OPC1_16_SRR_SUB
:
2713 case OPC1_16_SRR_SUB_A15B
:
2714 case OPC1_16_SRR_SUB_15AB
:
2715 case OPC1_16_SRR_SUBS
:
2716 case OPC1_16_SRR_XOR
:
2717 decode_srr_opc(ctx
, op1
);
2720 case OPC1_16_SSR_ST_A
:
2721 case OPC1_16_SSR_ST_A_POSTINC
:
2722 case OPC1_16_SSR_ST_B
:
2723 case OPC1_16_SSR_ST_B_POSTINC
:
2724 case OPC1_16_SSR_ST_H
:
2725 case OPC1_16_SSR_ST_H_POSTINC
:
2726 case OPC1_16_SSR_ST_W
:
2727 case OPC1_16_SSR_ST_W_POSTINC
:
2728 decode_ssr_opc(ctx
, op1
);
2731 case OPC1_16_SRRS_ADDSC_A
:
2732 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
2733 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
2734 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
2735 temp
= tcg_temp_new();
2736 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
2737 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
2738 tcg_temp_free(temp
);
2741 case OPC1_16_SLRO_LD_A
:
2742 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2743 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2744 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2746 case OPC1_16_SLRO_LD_BU
:
2747 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2748 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2749 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2751 case OPC1_16_SLRO_LD_H
:
2752 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2753 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2754 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2756 case OPC1_16_SLRO_LD_W
:
2757 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
2758 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
2759 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2762 case OPC1_16_SB_CALL
:
2764 case OPC1_16_SB_JNZ
:
2766 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
2767 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
2770 case OPC1_16_SBC_JEQ
:
2771 case OPC1_16_SBC_JNE
:
2772 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
2773 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
2774 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2777 case OPC1_16_SBRN_JNZ_T
:
2778 case OPC1_16_SBRN_JZ_T
:
2779 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
2780 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
2781 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
2784 case OPC1_16_SBR_JEQ
:
2785 case OPC1_16_SBR_JGEZ
:
2786 case OPC1_16_SBR_JGTZ
:
2787 case OPC1_16_SBR_JLEZ
:
2788 case OPC1_16_SBR_JLTZ
:
2789 case OPC1_16_SBR_JNE
:
2790 case OPC1_16_SBR_JNZ
:
2791 case OPC1_16_SBR_JNZ_A
:
2792 case OPC1_16_SBR_JZ
:
2793 case OPC1_16_SBR_JZ_A
:
2794 case OPC1_16_SBR_LOOP
:
2795 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
2796 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
2797 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
2800 case OPC1_16_SC_AND
:
2801 case OPC1_16_SC_BISR
:
2802 case OPC1_16_SC_LD_A
:
2803 case OPC1_16_SC_LD_W
:
2804 case OPC1_16_SC_MOV
:
2806 case OPC1_16_SC_ST_A
:
2807 case OPC1_16_SC_ST_W
:
2808 case OPC1_16_SC_SUB_A
:
2809 decode_sc_opc(ctx
, op1
);
2812 case OPC1_16_SLR_LD_A
:
2813 case OPC1_16_SLR_LD_A_POSTINC
:
2814 case OPC1_16_SLR_LD_BU
:
2815 case OPC1_16_SLR_LD_BU_POSTINC
:
2816 case OPC1_16_SLR_LD_H
:
2817 case OPC1_16_SLR_LD_H_POSTINC
:
2818 case OPC1_16_SLR_LD_W
:
2819 case OPC1_16_SLR_LD_W_POSTINC
:
2820 decode_slr_opc(ctx
, op1
);
2823 case OPC1_16_SRO_LD_A
:
2824 case OPC1_16_SRO_LD_BU
:
2825 case OPC1_16_SRO_LD_H
:
2826 case OPC1_16_SRO_LD_W
:
2827 case OPC1_16_SRO_ST_A
:
2828 case OPC1_16_SRO_ST_B
:
2829 case OPC1_16_SRO_ST_H
:
2830 case OPC1_16_SRO_ST_W
:
2831 decode_sro_opc(ctx
, op1
);
2834 case OPC1_16_SSRO_ST_A
:
2835 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2836 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2837 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2839 case OPC1_16_SSRO_ST_B
:
2840 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2841 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2842 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
2844 case OPC1_16_SSRO_ST_H
:
2845 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2846 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2847 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
2849 case OPC1_16_SSRO_ST_W
:
2850 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
2851 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
2852 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
2855 case OPCM_16_SR_SYSTEM
:
2856 decode_sr_system(env
, ctx
);
2858 case OPCM_16_SR_ACCU
:
2859 decode_sr_accu(env
, ctx
);
2862 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2863 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
2865 case OPC1_16_SR_NOT
:
2866 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2867 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2873 * 32 bit instructions
2877 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
2884 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2885 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2886 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2888 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2891 case OPC2_32_ABS_LD_A
:
2892 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2894 case OPC2_32_ABS_LD_D
:
2895 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
2897 case OPC2_32_ABS_LD_DA
:
2898 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
2900 case OPC2_32_ABS_LD_W
:
2901 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
2905 tcg_temp_free(temp
);
2908 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
2915 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2916 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2917 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2919 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2922 case OPC2_32_ABS_LD_B
:
2923 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
2925 case OPC2_32_ABS_LD_BU
:
2926 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
2928 case OPC2_32_ABS_LD_H
:
2929 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
2931 case OPC2_32_ABS_LD_HU
:
2932 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
2936 tcg_temp_free(temp
);
2939 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
2946 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2947 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2948 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2950 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
2953 case OPC2_32_ABS_LDMST
:
2954 gen_ldmst(ctx
, r1
, temp
);
2956 case OPC2_32_ABS_SWAP_W
:
2957 gen_swap(ctx
, r1
, temp
);
2961 tcg_temp_free(temp
);
2964 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
2969 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2970 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2973 case OPC2_32_ABS_LDLCX
:
2974 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
2976 case OPC2_32_ABS_LDUCX
:
2977 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
2979 case OPC2_32_ABS_STLCX
:
2980 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
2982 case OPC2_32_ABS_STUCX
:
2983 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
2988 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
2995 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
2996 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
2997 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
2999 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3002 case OPC2_32_ABS_ST_A
:
3003 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3005 case OPC2_32_ABS_ST_D
:
3006 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3008 case OPC2_32_ABS_ST_DA
:
3009 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3011 case OPC2_32_ABS_ST_W
:
3012 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3016 tcg_temp_free(temp
);
3019 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
3026 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3027 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3028 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3030 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3033 case OPC2_32_ABS_ST_B
:
3034 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3036 case OPC2_32_ABS_ST_H
:
3037 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3040 tcg_temp_free(temp
);
3045 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3051 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3052 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3053 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3054 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3055 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3056 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3060 case OPC2_32_BIT_AND_AND_T
:
3061 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3062 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3064 case OPC2_32_BIT_AND_ANDN_T
:
3065 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3066 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3068 case OPC2_32_BIT_AND_NOR_T
:
3069 if (TCG_TARGET_HAS_andc_i32
) {
3070 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3071 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3073 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3074 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3077 case OPC2_32_BIT_AND_OR_T
:
3078 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3079 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3084 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
3089 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3090 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3091 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3092 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3093 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3094 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3097 case OPC2_32_BIT_AND_T
:
3098 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3099 pos1
, pos2
, &tcg_gen_and_tl
);
3101 case OPC2_32_BIT_ANDN_T
:
3102 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3103 pos1
, pos2
, &tcg_gen_andc_tl
);
3105 case OPC2_32_BIT_NOR_T
:
3106 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3107 pos1
, pos2
, &tcg_gen_nor_tl
);
3109 case OPC2_32_BIT_OR_T
:
3110 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3111 pos1
, pos2
, &tcg_gen_or_tl
);
3116 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3122 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3123 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3124 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3125 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3126 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3127 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3129 temp
= tcg_temp_new();
3131 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
3132 if (op2
== OPC2_32_BIT_INSN_T
) {
3133 tcg_gen_not_tl(temp
, temp
);
3135 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
3136 tcg_temp_free(temp
);
3139 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3146 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3147 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3148 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3149 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3150 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3151 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3154 case OPC2_32_BIT_NAND_T
:
3155 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3156 pos1
, pos2
, &tcg_gen_nand_tl
);
3158 case OPC2_32_BIT_ORN_T
:
3159 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3160 pos1
, pos2
, &tcg_gen_orc_tl
);
3162 case OPC2_32_BIT_XNOR_T
:
3163 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3164 pos1
, pos2
, &tcg_gen_eqv_tl
);
3166 case OPC2_32_BIT_XOR_T
:
3167 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3168 pos1
, pos2
, &tcg_gen_xor_tl
);
3173 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
3180 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3181 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3182 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3183 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3184 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3185 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3188 case OPC2_32_BIT_OR_AND_T
:
3189 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3190 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
3192 case OPC2_32_BIT_OR_ANDN_T
:
3193 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3194 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
3196 case OPC2_32_BIT_OR_NOR_T
:
3197 if (TCG_TARGET_HAS_orc_i32
) {
3198 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3199 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
3201 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3202 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
3205 case OPC2_32_BIT_OR_OR_T
:
3206 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3207 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
3212 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
3219 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3220 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3221 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3222 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3223 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3224 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3226 temp
= tcg_temp_new();
3229 case OPC2_32_BIT_SH_AND_T
:
3230 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3231 pos1
, pos2
, &tcg_gen_and_tl
);
3233 case OPC2_32_BIT_SH_ANDN_T
:
3234 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3235 pos1
, pos2
, &tcg_gen_andc_tl
);
3237 case OPC2_32_BIT_SH_NOR_T
:
3238 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3239 pos1
, pos2
, &tcg_gen_nor_tl
);
3241 case OPC2_32_BIT_SH_OR_T
:
3242 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3243 pos1
, pos2
, &tcg_gen_or_tl
);
3246 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3247 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
3248 tcg_temp_free(temp
);
3251 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3258 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3259 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3260 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3261 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3262 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3263 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3265 temp
= tcg_temp_new();
3268 case OPC2_32_BIT_SH_NAND_T
:
3269 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
3270 pos1
, pos2
, &tcg_gen_nand_tl
);
3272 case OPC2_32_BIT_SH_ORN_T
:
3273 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3274 pos1
, pos2
, &tcg_gen_orc_tl
);
3276 case OPC2_32_BIT_SH_XNOR_T
:
3277 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3278 pos1
, pos2
, &tcg_gen_eqv_tl
);
3280 case OPC2_32_BIT_SH_XOR_T
:
3281 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3282 pos1
, pos2
, &tcg_gen_xor_tl
);
3285 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3286 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
3287 tcg_temp_free(temp
);
3293 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
3301 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3302 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3303 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3304 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3307 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
3308 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
3309 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
3310 /* instruction to access the cache */
3312 case OPC2_32_BO_CACHEA_WI_POSTINC
:
3313 case OPC2_32_BO_CACHEA_W_POSTINC
:
3314 case OPC2_32_BO_CACHEA_I_POSTINC
:
3315 /* instruction to access the cache, but we still need to handle
3316 the addressing mode */
3317 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3319 case OPC2_32_BO_CACHEA_WI_PREINC
:
3320 case OPC2_32_BO_CACHEA_W_PREINC
:
3321 case OPC2_32_BO_CACHEA_I_PREINC
:
3322 /* instruction to access the cache, but we still need to handle
3323 the addressing mode */
3324 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3326 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
3327 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
3328 /* TODO: Raise illegal opcode trap,
3329 if !tricore_feature(TRICORE_FEATURE_131) */
3331 case OPC2_32_BO_CACHEI_W_POSTINC
:
3332 case OPC2_32_BO_CACHEI_WI_POSTINC
:
3333 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
3334 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3335 } /* TODO: else raise illegal opcode trap */
3337 case OPC2_32_BO_CACHEI_W_PREINC
:
3338 case OPC2_32_BO_CACHEI_WI_PREINC
:
3339 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
3340 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3341 } /* TODO: else raise illegal opcode trap */
3343 case OPC2_32_BO_ST_A_SHORTOFF
:
3344 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
3346 case OPC2_32_BO_ST_A_POSTINC
:
3347 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3349 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3351 case OPC2_32_BO_ST_A_PREINC
:
3352 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
3354 case OPC2_32_BO_ST_B_SHORTOFF
:
3355 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3357 case OPC2_32_BO_ST_B_POSTINC
:
3358 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3360 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3362 case OPC2_32_BO_ST_B_PREINC
:
3363 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3365 case OPC2_32_BO_ST_D_SHORTOFF
:
3366 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
3369 case OPC2_32_BO_ST_D_POSTINC
:
3370 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
3371 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3373 case OPC2_32_BO_ST_D_PREINC
:
3374 temp
= tcg_temp_new();
3375 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3376 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3377 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3378 tcg_temp_free(temp
);
3380 case OPC2_32_BO_ST_DA_SHORTOFF
:
3381 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3384 case OPC2_32_BO_ST_DA_POSTINC
:
3385 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
3386 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3388 case OPC2_32_BO_ST_DA_PREINC
:
3389 temp
= tcg_temp_new();
3390 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3391 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3392 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3393 tcg_temp_free(temp
);
3395 case OPC2_32_BO_ST_H_SHORTOFF
:
3396 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3398 case OPC2_32_BO_ST_H_POSTINC
:
3399 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3401 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3403 case OPC2_32_BO_ST_H_PREINC
:
3404 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3406 case OPC2_32_BO_ST_Q_SHORTOFF
:
3407 temp
= tcg_temp_new();
3408 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3409 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3410 tcg_temp_free(temp
);
3412 case OPC2_32_BO_ST_Q_POSTINC
:
3413 temp
= tcg_temp_new();
3414 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3415 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
3417 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3418 tcg_temp_free(temp
);
3420 case OPC2_32_BO_ST_Q_PREINC
:
3421 temp
= tcg_temp_new();
3422 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3423 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3424 tcg_temp_free(temp
);
3426 case OPC2_32_BO_ST_W_SHORTOFF
:
3427 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3429 case OPC2_32_BO_ST_W_POSTINC
:
3430 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3432 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3434 case OPC2_32_BO_ST_W_PREINC
:
3435 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3440 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
3446 TCGv temp
, temp2
, temp3
;
3448 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3449 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3450 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3451 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3453 temp
= tcg_temp_new();
3454 temp2
= tcg_temp_new();
3455 temp3
= tcg_const_i32(off10
);
3457 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3458 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3461 case OPC2_32_BO_CACHEA_WI_BR
:
3462 case OPC2_32_BO_CACHEA_W_BR
:
3463 case OPC2_32_BO_CACHEA_I_BR
:
3464 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3466 case OPC2_32_BO_CACHEA_WI_CIRC
:
3467 case OPC2_32_BO_CACHEA_W_CIRC
:
3468 case OPC2_32_BO_CACHEA_I_CIRC
:
3469 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3471 case OPC2_32_BO_ST_A_BR
:
3472 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3473 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3475 case OPC2_32_BO_ST_A_CIRC
:
3476 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3477 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3479 case OPC2_32_BO_ST_B_BR
:
3480 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3481 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3483 case OPC2_32_BO_ST_B_CIRC
:
3484 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3485 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3487 case OPC2_32_BO_ST_D_BR
:
3488 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3489 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3491 case OPC2_32_BO_ST_D_CIRC
:
3492 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3493 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3494 tcg_gen_addi_tl(temp
, temp
, 4);
3495 tcg_gen_rem_tl(temp
, temp
, temp2
);
3496 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3497 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3498 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3500 case OPC2_32_BO_ST_DA_BR
:
3501 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3502 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3504 case OPC2_32_BO_ST_DA_CIRC
:
3505 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3506 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3507 tcg_gen_addi_tl(temp
, temp
, 4);
3508 tcg_gen_rem_tl(temp
, temp
, temp2
);
3509 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3510 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3511 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3513 case OPC2_32_BO_ST_H_BR
:
3514 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3515 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3517 case OPC2_32_BO_ST_H_CIRC
:
3518 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3519 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3521 case OPC2_32_BO_ST_Q_BR
:
3522 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3523 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3524 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3526 case OPC2_32_BO_ST_Q_CIRC
:
3527 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3528 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3529 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3531 case OPC2_32_BO_ST_W_BR
:
3532 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3533 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3535 case OPC2_32_BO_ST_W_CIRC
:
3536 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3537 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3540 tcg_temp_free(temp
);
3541 tcg_temp_free(temp2
);
3542 tcg_temp_free(temp3
);
3545 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
3553 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3554 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3555 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3556 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3559 case OPC2_32_BO_LD_A_SHORTOFF
:
3560 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3562 case OPC2_32_BO_LD_A_POSTINC
:
3563 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3565 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3567 case OPC2_32_BO_LD_A_PREINC
:
3568 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3570 case OPC2_32_BO_LD_B_SHORTOFF
:
3571 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3573 case OPC2_32_BO_LD_B_POSTINC
:
3574 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3576 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3578 case OPC2_32_BO_LD_B_PREINC
:
3579 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3581 case OPC2_32_BO_LD_BU_SHORTOFF
:
3582 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3584 case OPC2_32_BO_LD_BU_POSTINC
:
3585 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3587 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3589 case OPC2_32_BO_LD_BU_PREINC
:
3590 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3592 case OPC2_32_BO_LD_D_SHORTOFF
:
3593 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
3596 case OPC2_32_BO_LD_D_POSTINC
:
3597 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
3598 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3600 case OPC2_32_BO_LD_D_PREINC
:
3601 temp
= tcg_temp_new();
3602 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3603 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3604 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3605 tcg_temp_free(temp
);
3607 case OPC2_32_BO_LD_DA_SHORTOFF
:
3608 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3611 case OPC2_32_BO_LD_DA_POSTINC
:
3612 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
3613 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3615 case OPC2_32_BO_LD_DA_PREINC
:
3616 temp
= tcg_temp_new();
3617 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3618 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3619 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3620 tcg_temp_free(temp
);
3622 case OPC2_32_BO_LD_H_SHORTOFF
:
3623 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3625 case OPC2_32_BO_LD_H_POSTINC
:
3626 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3628 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3630 case OPC2_32_BO_LD_H_PREINC
:
3631 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3633 case OPC2_32_BO_LD_HU_SHORTOFF
:
3634 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3636 case OPC2_32_BO_LD_HU_POSTINC
:
3637 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3639 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3641 case OPC2_32_BO_LD_HU_PREINC
:
3642 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3644 case OPC2_32_BO_LD_Q_SHORTOFF
:
3645 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3646 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3648 case OPC2_32_BO_LD_Q_POSTINC
:
3649 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3651 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3652 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3654 case OPC2_32_BO_LD_Q_PREINC
:
3655 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3656 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3658 case OPC2_32_BO_LD_W_SHORTOFF
:
3659 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3661 case OPC2_32_BO_LD_W_POSTINC
:
3662 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3664 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3666 case OPC2_32_BO_LD_W_PREINC
:
3667 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3672 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
3679 TCGv temp
, temp2
, temp3
;
3681 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3682 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3683 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3684 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3686 temp
= tcg_temp_new();
3687 temp2
= tcg_temp_new();
3688 temp3
= tcg_const_i32(off10
);
3690 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3691 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3695 case OPC2_32_BO_LD_A_BR
:
3696 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3697 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3699 case OPC2_32_BO_LD_A_CIRC
:
3700 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3701 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3703 case OPC2_32_BO_LD_B_BR
:
3704 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3705 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3707 case OPC2_32_BO_LD_B_CIRC
:
3708 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3709 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3711 case OPC2_32_BO_LD_BU_BR
:
3712 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3713 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3715 case OPC2_32_BO_LD_BU_CIRC
:
3716 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3717 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3719 case OPC2_32_BO_LD_D_BR
:
3720 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3721 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3723 case OPC2_32_BO_LD_D_CIRC
:
3724 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3725 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3726 tcg_gen_addi_tl(temp
, temp
, 4);
3727 tcg_gen_rem_tl(temp
, temp
, temp2
);
3728 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3729 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3730 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3732 case OPC2_32_BO_LD_DA_BR
:
3733 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3734 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3736 case OPC2_32_BO_LD_DA_CIRC
:
3737 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3738 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3739 tcg_gen_addi_tl(temp
, temp
, 4);
3740 tcg_gen_rem_tl(temp
, temp
, temp2
);
3741 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3742 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3743 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3745 case OPC2_32_BO_LD_H_BR
:
3746 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3747 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3749 case OPC2_32_BO_LD_H_CIRC
:
3750 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
3751 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3753 case OPC2_32_BO_LD_HU_BR
:
3754 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3755 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3757 case OPC2_32_BO_LD_HU_CIRC
:
3758 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3759 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3761 case OPC2_32_BO_LD_Q_BR
:
3762 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3763 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3764 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3766 case OPC2_32_BO_LD_Q_CIRC
:
3767 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3768 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3769 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3771 case OPC2_32_BO_LD_W_BR
:
3772 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3773 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3775 case OPC2_32_BO_LD_W_CIRC
:
3776 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3777 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3780 tcg_temp_free(temp
);
3781 tcg_temp_free(temp2
);
3782 tcg_temp_free(temp3
);
3785 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
3794 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3795 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3796 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3797 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3800 temp
= tcg_temp_new();
3801 temp2
= tcg_temp_new();
3804 case OPC2_32_BO_LDLCX_SHORTOFF
:
3805 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3806 gen_helper_ldlcx(cpu_env
, temp
);
3808 case OPC2_32_BO_LDMST_SHORTOFF
:
3809 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3810 gen_ldmst(ctx
, r1
, temp
);
3812 case OPC2_32_BO_LDMST_POSTINC
:
3813 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3814 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3816 case OPC2_32_BO_LDMST_PREINC
:
3817 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3818 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
3820 case OPC2_32_BO_LDUCX_SHORTOFF
:
3821 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3822 gen_helper_lducx(cpu_env
, temp
);
3824 case OPC2_32_BO_LEA_SHORTOFF
:
3825 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
3827 case OPC2_32_BO_STLCX_SHORTOFF
:
3828 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3829 gen_helper_stlcx(cpu_env
, temp
);
3831 case OPC2_32_BO_STUCX_SHORTOFF
:
3832 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3833 gen_helper_stucx(cpu_env
, temp
);
3835 case OPC2_32_BO_SWAP_W_SHORTOFF
:
3836 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3837 gen_swap(ctx
, r1
, temp
);
3839 case OPC2_32_BO_SWAP_W_POSTINC
:
3840 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3841 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3843 case OPC2_32_BO_SWAP_W_PREINC
:
3844 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3845 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
3848 tcg_temp_free(temp
);
3849 tcg_temp_free(temp2
);
3852 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
3859 TCGv temp
, temp2
, temp3
;
3861 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3862 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3863 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3864 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3866 temp
= tcg_temp_new();
3867 temp2
= tcg_temp_new();
3868 temp3
= tcg_const_i32(off10
);
3870 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3871 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3874 case OPC2_32_BO_LDMST_BR
:
3875 gen_ldmst(ctx
, r1
, temp2
);
3876 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3878 case OPC2_32_BO_LDMST_CIRC
:
3879 gen_ldmst(ctx
, r1
, temp2
);
3880 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3882 case OPC2_32_BO_SWAP_W_BR
:
3883 gen_swap(ctx
, r1
, temp2
);
3884 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3886 case OPC2_32_BO_SWAP_W_CIRC
:
3887 gen_swap(ctx
, r1
, temp2
);
3888 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3891 tcg_temp_free(temp
);
3892 tcg_temp_free(temp2
);
3893 tcg_temp_free(temp3
);
3896 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
3902 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
3903 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
3904 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
3907 case OPC1_32_BOL_LD_A_LONGOFF
:
3908 temp
= tcg_temp_new();
3909 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3910 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3911 tcg_temp_free(temp
);
3913 case OPC1_32_BOL_LD_W_LONGOFF
:
3914 temp
= tcg_temp_new();
3915 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
3916 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
3917 tcg_temp_free(temp
);
3919 case OPC1_32_BOL_LEA_LONGOFF
:
3920 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
3922 case OPC1_32_BOL_ST_A_LONGOFF
:
3923 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3924 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3926 /* raise illegal opcode trap */
3929 case OPC1_32_BOL_ST_W_LONGOFF
:
3930 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
3932 case OPC1_32_BOL_LD_B_LONGOFF
:
3933 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3934 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
3936 /* raise illegal opcode trap */
3939 case OPC1_32_BOL_LD_BU_LONGOFF
:
3940 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3941 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
3943 /* raise illegal opcode trap */
3946 case OPC1_32_BOL_LD_H_LONGOFF
:
3947 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3948 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
3950 /* raise illegal opcode trap */
3953 case OPC1_32_BOL_LD_HU_LONGOFF
:
3954 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3955 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
3957 /* raise illegal opcode trap */
3960 case OPC1_32_BOL_ST_B_LONGOFF
:
3961 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3962 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
3964 /* raise illegal opcode trap */
3967 case OPC1_32_BOL_ST_H_LONGOFF
:
3968 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3969 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
3971 /* raise illegal opcode trap */
3978 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
3985 r2
= MASK_OP_RC_D(ctx
->opcode
);
3986 r1
= MASK_OP_RC_S1(ctx
->opcode
);
3987 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
3988 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
3990 temp
= tcg_temp_new();
3993 case OPC2_32_RC_AND
:
3994 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
3996 case OPC2_32_RC_ANDN
:
3997 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
3999 case OPC2_32_RC_NAND
:
4000 tcg_gen_movi_tl(temp
, const9
);
4001 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4003 case OPC2_32_RC_NOR
:
4004 tcg_gen_movi_tl(temp
, const9
);
4005 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4008 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4010 case OPC2_32_RC_ORN
:
4011 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4014 const9
= sextract32(const9
, 0, 6);
4015 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4017 case OPC2_32_RC_SH_H
:
4018 const9
= sextract32(const9
, 0, 5);
4019 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4021 case OPC2_32_RC_SHA
:
4022 const9
= sextract32(const9
, 0, 6);
4023 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4025 case OPC2_32_RC_SHA_H
:
4026 const9
= sextract32(const9
, 0, 5);
4027 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4029 case OPC2_32_RC_SHAS
:
4030 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4032 case OPC2_32_RC_XNOR
:
4033 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4034 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
4036 case OPC2_32_RC_XOR
:
4037 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4040 tcg_temp_free(temp
);
4043 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4051 r2
= MASK_OP_RC_D(ctx
->opcode
);
4052 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4053 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4055 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4057 temp
= tcg_temp_new();
4060 case OPC2_32_RC_ABSDIF
:
4061 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4063 case OPC2_32_RC_ABSDIFS
:
4064 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4066 case OPC2_32_RC_ADD
:
4067 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4069 case OPC2_32_RC_ADDC
:
4070 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4072 case OPC2_32_RC_ADDS
:
4073 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4075 case OPC2_32_RC_ADDS_U
:
4076 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4078 case OPC2_32_RC_ADDX
:
4079 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4081 case OPC2_32_RC_AND_EQ
:
4082 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4083 const9
, &tcg_gen_and_tl
);
4085 case OPC2_32_RC_AND_GE
:
4086 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4087 const9
, &tcg_gen_and_tl
);
4089 case OPC2_32_RC_AND_GE_U
:
4090 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4091 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4092 const9
, &tcg_gen_and_tl
);
4094 case OPC2_32_RC_AND_LT
:
4095 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4096 const9
, &tcg_gen_and_tl
);
4098 case OPC2_32_RC_AND_LT_U
:
4099 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4100 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4101 const9
, &tcg_gen_and_tl
);
4103 case OPC2_32_RC_AND_NE
:
4104 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4105 const9
, &tcg_gen_and_tl
);
4108 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4110 case OPC2_32_RC_EQANY_B
:
4111 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4113 case OPC2_32_RC_EQANY_H
:
4114 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4117 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4119 case OPC2_32_RC_GE_U
:
4120 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4121 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4124 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4126 case OPC2_32_RC_LT_U
:
4127 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4128 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4130 case OPC2_32_RC_MAX
:
4131 tcg_gen_movi_tl(temp
, const9
);
4132 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4133 cpu_gpr_d
[r1
], temp
);
4135 case OPC2_32_RC_MAX_U
:
4136 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4137 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4138 cpu_gpr_d
[r1
], temp
);
4140 case OPC2_32_RC_MIN
:
4141 tcg_gen_movi_tl(temp
, const9
);
4142 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4143 cpu_gpr_d
[r1
], temp
);
4145 case OPC2_32_RC_MIN_U
:
4146 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4147 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4148 cpu_gpr_d
[r1
], temp
);
4151 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4153 case OPC2_32_RC_OR_EQ
:
4154 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4155 const9
, &tcg_gen_or_tl
);
4157 case OPC2_32_RC_OR_GE
:
4158 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4159 const9
, &tcg_gen_or_tl
);
4161 case OPC2_32_RC_OR_GE_U
:
4162 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4163 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4164 const9
, &tcg_gen_or_tl
);
4166 case OPC2_32_RC_OR_LT
:
4167 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4168 const9
, &tcg_gen_or_tl
);
4170 case OPC2_32_RC_OR_LT_U
:
4171 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4172 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4173 const9
, &tcg_gen_or_tl
);
4175 case OPC2_32_RC_OR_NE
:
4176 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4177 const9
, &tcg_gen_or_tl
);
4179 case OPC2_32_RC_RSUB
:
4180 tcg_gen_movi_tl(temp
, const9
);
4181 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4183 case OPC2_32_RC_RSUBS
:
4184 tcg_gen_movi_tl(temp
, const9
);
4185 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4187 case OPC2_32_RC_RSUBS_U
:
4188 tcg_gen_movi_tl(temp
, const9
);
4189 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4191 case OPC2_32_RC_SH_EQ
:
4192 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4194 case OPC2_32_RC_SH_GE
:
4195 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4197 case OPC2_32_RC_SH_GE_U
:
4198 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4199 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4201 case OPC2_32_RC_SH_LT
:
4202 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4204 case OPC2_32_RC_SH_LT_U
:
4205 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4206 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4208 case OPC2_32_RC_SH_NE
:
4209 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4211 case OPC2_32_RC_XOR_EQ
:
4212 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4213 const9
, &tcg_gen_xor_tl
);
4215 case OPC2_32_RC_XOR_GE
:
4216 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4217 const9
, &tcg_gen_xor_tl
);
4219 case OPC2_32_RC_XOR_GE_U
:
4220 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4221 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4222 const9
, &tcg_gen_xor_tl
);
4224 case OPC2_32_RC_XOR_LT
:
4225 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4226 const9
, &tcg_gen_xor_tl
);
4228 case OPC2_32_RC_XOR_LT_U
:
4229 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4230 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4231 const9
, &tcg_gen_xor_tl
);
4233 case OPC2_32_RC_XOR_NE
:
4234 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4235 const9
, &tcg_gen_xor_tl
);
4238 tcg_temp_free(temp
);
4241 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
4246 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4247 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4250 case OPC2_32_RC_BISR
:
4251 gen_helper_1arg(bisr
, const9
);
4253 case OPC2_32_RC_SYSCALL
:
4254 /* TODO: Add exception generation */
4259 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
4265 r2
= MASK_OP_RC_D(ctx
->opcode
);
4266 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4267 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4269 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4272 case OPC2_32_RC_MUL_32
:
4273 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4275 case OPC2_32_RC_MUL_64
:
4276 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4278 case OPC2_32_RC_MULS_32
:
4279 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4281 case OPC2_32_RC_MUL_U_64
:
4282 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4283 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4285 case OPC2_32_RC_MULS_U_32
:
4286 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4287 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4293 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4297 int32_t pos
, width
, const4
;
4301 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
4302 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
4303 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
4304 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
4305 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
4306 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
4309 case OPC2_32_RCPW_IMASK
:
4310 /* if pos + width > 31 undefined result */
4311 if (pos
+ width
<= 31) {
4312 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
4313 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
4316 case OPC2_32_RCPW_INSERT
:
4317 /* if pos + width > 32 undefined result */
4318 if (pos
+ width
<= 32) {
4319 temp
= tcg_const_i32(const4
);
4320 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
4321 tcg_temp_free(temp
);
4329 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4333 int32_t width
, const4
;
4335 TCGv temp
, temp2
, temp3
;
4337 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
4338 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
4339 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
4340 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
4341 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
4342 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
4344 temp
= tcg_temp_new();
4345 temp2
= tcg_temp_new();
4348 case OPC2_32_RCRW_IMASK
:
4349 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
4350 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
4351 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
4352 tcg_gen_movi_tl(temp2
, const4
);
4353 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
4355 case OPC2_32_RCRW_INSERT
:
4356 temp3
= tcg_temp_new();
4358 tcg_gen_movi_tl(temp
, width
);
4359 tcg_gen_movi_tl(temp2
, const4
);
4360 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
4361 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
4363 tcg_temp_free(temp3
);
4366 tcg_temp_free(temp
);
4367 tcg_temp_free(temp2
);
4372 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
4380 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4381 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4382 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4383 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4384 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4387 case OPC2_32_RCR_CADD
:
4388 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
4391 case OPC2_32_RCR_CADDN
:
4392 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
4395 case OPC2_32_RCR_SEL
:
4396 temp
= tcg_const_i32(0);
4397 temp2
= tcg_const_i32(const9
);
4398 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
4399 cpu_gpr_d
[r1
], temp2
);
4400 tcg_temp_free(temp
);
4401 tcg_temp_free(temp2
);
4403 case OPC2_32_RCR_SELN
:
4404 temp
= tcg_const_i32(0);
4405 temp2
= tcg_const_i32(const9
);
4406 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
4407 cpu_gpr_d
[r1
], temp2
);
4408 tcg_temp_free(temp
);
4409 tcg_temp_free(temp2
);
4414 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
4421 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4422 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4423 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4424 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4425 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4428 case OPC2_32_RCR_MADD_32
:
4429 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4431 case OPC2_32_RCR_MADD_64
:
4432 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4433 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4435 case OPC2_32_RCR_MADDS_32
:
4436 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4438 case OPC2_32_RCR_MADDS_64
:
4439 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4440 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4442 case OPC2_32_RCR_MADD_U_64
:
4443 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4444 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4445 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4447 case OPC2_32_RCR_MADDS_U_32
:
4448 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4449 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4451 case OPC2_32_RCR_MADDS_U_64
:
4452 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4453 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4454 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4459 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
4466 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4467 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4468 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4469 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4470 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4473 case OPC2_32_RCR_MSUB_32
:
4474 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4476 case OPC2_32_RCR_MSUB_64
:
4477 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4478 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4480 case OPC2_32_RCR_MSUBS_32
:
4481 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4483 case OPC2_32_RCR_MSUBS_64
:
4484 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4485 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4487 case OPC2_32_RCR_MSUB_U_64
:
4488 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4489 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4490 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4492 case OPC2_32_RCR_MSUBS_U_32
:
4493 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4494 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4496 case OPC2_32_RCR_MSUBS_U_64
:
4497 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4498 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4499 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4506 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
4512 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
4513 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
4514 r2
= MASK_OP_RLC_D(ctx
->opcode
);
4517 case OPC1_32_RLC_ADDI
:
4518 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
4520 case OPC1_32_RLC_ADDIH
:
4521 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
4523 case OPC1_32_RLC_ADDIH_A
:
4524 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
4526 case OPC1_32_RLC_MFCR
:
4527 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4528 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
4530 case OPC1_32_RLC_MOV
:
4531 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4533 case OPC1_32_RLC_MOV_64
:
4534 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4535 if ((r2
& 0x1) != 0) {
4536 /* TODO: raise OPD trap */
4538 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4539 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
4541 /* TODO: raise illegal opcode trap */
4544 case OPC1_32_RLC_MOV_U
:
4545 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4546 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4548 case OPC1_32_RLC_MOV_H
:
4549 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
4551 case OPC1_32_RLC_MOVH_A
:
4552 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
4554 case OPC1_32_RLC_MTCR
:
4555 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4556 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
4562 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4567 r3
= MASK_OP_RR_D(ctx
->opcode
);
4568 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4569 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4570 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4573 case OPC2_32_RR_ABS
:
4574 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
4576 case OPC2_32_RR_ABS_B
:
4577 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4579 case OPC2_32_RR_ABS_H
:
4580 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4582 case OPC2_32_RR_ABSDIF
:
4583 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4585 case OPC2_32_RR_ABSDIF_B
:
4586 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4589 case OPC2_32_RR_ABSDIF_H
:
4590 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4593 case OPC2_32_RR_ABSDIFS
:
4594 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4597 case OPC2_32_RR_ABSDIFS_H
:
4598 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4601 case OPC2_32_RR_ABSS
:
4602 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4604 case OPC2_32_RR_ABSS_H
:
4605 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4607 case OPC2_32_RR_ADD
:
4608 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4610 case OPC2_32_RR_ADD_B
:
4611 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4613 case OPC2_32_RR_ADD_H
:
4614 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4616 case OPC2_32_RR_ADDC
:
4617 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4619 case OPC2_32_RR_ADDS
:
4620 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4622 case OPC2_32_RR_ADDS_H
:
4623 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4626 case OPC2_32_RR_ADDS_HU
:
4627 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4630 case OPC2_32_RR_ADDS_U
:
4631 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4634 case OPC2_32_RR_ADDX
:
4635 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4637 case OPC2_32_RR_AND_EQ
:
4638 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4639 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4641 case OPC2_32_RR_AND_GE
:
4642 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4643 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4645 case OPC2_32_RR_AND_GE_U
:
4646 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4647 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4649 case OPC2_32_RR_AND_LT
:
4650 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4651 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4653 case OPC2_32_RR_AND_LT_U
:
4654 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4655 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4657 case OPC2_32_RR_AND_NE
:
4658 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4659 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4662 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4665 case OPC2_32_RR_EQ_B
:
4666 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4668 case OPC2_32_RR_EQ_H
:
4669 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4671 case OPC2_32_RR_EQ_W
:
4672 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4674 case OPC2_32_RR_EQANY_B
:
4675 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4677 case OPC2_32_RR_EQANY_H
:
4678 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4681 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4684 case OPC2_32_RR_GE_U
:
4685 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4689 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4692 case OPC2_32_RR_LT_U
:
4693 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4696 case OPC2_32_RR_LT_B
:
4697 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4699 case OPC2_32_RR_LT_BU
:
4700 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4702 case OPC2_32_RR_LT_H
:
4703 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4705 case OPC2_32_RR_LT_HU
:
4706 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4708 case OPC2_32_RR_LT_W
:
4709 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4711 case OPC2_32_RR_LT_WU
:
4712 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4714 case OPC2_32_RR_MAX
:
4715 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4716 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4718 case OPC2_32_RR_MAX_U
:
4719 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4720 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4722 case OPC2_32_RR_MAX_B
:
4723 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4725 case OPC2_32_RR_MAX_BU
:
4726 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4728 case OPC2_32_RR_MAX_H
:
4729 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4731 case OPC2_32_RR_MAX_HU
:
4732 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4734 case OPC2_32_RR_MIN
:
4735 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4736 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4738 case OPC2_32_RR_MIN_U
:
4739 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4740 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4742 case OPC2_32_RR_MIN_B
:
4743 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4745 case OPC2_32_RR_MIN_BU
:
4746 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4748 case OPC2_32_RR_MIN_H
:
4749 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4751 case OPC2_32_RR_MIN_HU
:
4752 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4754 case OPC2_32_RR_MOV
:
4755 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
4758 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4761 case OPC2_32_RR_OR_EQ
:
4762 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4763 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4765 case OPC2_32_RR_OR_GE
:
4766 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4767 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4769 case OPC2_32_RR_OR_GE_U
:
4770 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4771 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4773 case OPC2_32_RR_OR_LT
:
4774 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4775 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4777 case OPC2_32_RR_OR_LT_U
:
4778 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4779 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4781 case OPC2_32_RR_OR_NE
:
4782 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4783 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
4785 case OPC2_32_RR_SAT_B
:
4786 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
4788 case OPC2_32_RR_SAT_BU
:
4789 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
4791 case OPC2_32_RR_SAT_H
:
4792 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
4794 case OPC2_32_RR_SAT_HU
:
4795 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
4797 case OPC2_32_RR_SH_EQ
:
4798 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4801 case OPC2_32_RR_SH_GE
:
4802 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4805 case OPC2_32_RR_SH_GE_U
:
4806 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4809 case OPC2_32_RR_SH_LT
:
4810 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4813 case OPC2_32_RR_SH_LT_U
:
4814 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4817 case OPC2_32_RR_SH_NE
:
4818 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4821 case OPC2_32_RR_SUB
:
4822 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4824 case OPC2_32_RR_SUB_B
:
4825 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4827 case OPC2_32_RR_SUB_H
:
4828 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4830 case OPC2_32_RR_SUBC
:
4831 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4833 case OPC2_32_RR_SUBS
:
4834 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4836 case OPC2_32_RR_SUBS_U
:
4837 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4839 case OPC2_32_RR_SUBS_H
:
4840 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4843 case OPC2_32_RR_SUBS_HU
:
4844 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4847 case OPC2_32_RR_SUBX
:
4848 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4850 case OPC2_32_RR_XOR_EQ
:
4851 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4852 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4854 case OPC2_32_RR_XOR_GE
:
4855 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4856 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4858 case OPC2_32_RR_XOR_GE_U
:
4859 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4860 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4862 case OPC2_32_RR_XOR_LT
:
4863 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4864 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4866 case OPC2_32_RR_XOR_LT_U
:
4867 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4868 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4870 case OPC2_32_RR_XOR_NE
:
4871 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4872 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
4877 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
4883 r3
= MASK_OP_RR_D(ctx
->opcode
);
4884 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4885 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4887 temp
= tcg_temp_new();
4888 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4891 case OPC2_32_RR_AND
:
4892 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4894 case OPC2_32_RR_ANDN
:
4895 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4897 case OPC2_32_RR_CLO
:
4898 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4900 case OPC2_32_RR_CLO_H
:
4901 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4903 case OPC2_32_RR_CLS
:
4904 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4906 case OPC2_32_RR_CLS_H
:
4907 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4909 case OPC2_32_RR_CLZ
:
4910 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4912 case OPC2_32_RR_CLZ_H
:
4913 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
4915 case OPC2_32_RR_NAND
:
4916 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4918 case OPC2_32_RR_NOR
:
4919 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4922 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4924 case OPC2_32_RR_ORN
:
4925 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4928 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4930 case OPC2_32_RR_SH_H
:
4931 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4933 case OPC2_32_RR_SHA
:
4934 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4936 case OPC2_32_RR_SHA_H
:
4937 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4939 case OPC2_32_RR_SHAS
:
4940 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4942 case OPC2_32_RR_XNOR
:
4943 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4945 case OPC2_32_RR_XOR
:
4946 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4949 tcg_temp_free(temp
);
4952 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
4958 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4959 r3
= MASK_OP_RR_D(ctx
->opcode
);
4960 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4961 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4962 n
= MASK_OP_RR_N(ctx
->opcode
);
4965 case OPC2_32_RR_ADD_A
:
4966 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
4968 case OPC2_32_RR_ADDSC_A
:
4969 temp
= tcg_temp_new();
4970 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
4971 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
4972 tcg_temp_free(temp
);
4974 case OPC2_32_RR_ADDSC_AT
:
4975 temp
= tcg_temp_new();
4976 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
4977 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
4978 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
4979 tcg_temp_free(temp
);
4981 case OPC2_32_RR_EQ_A
:
4982 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4985 case OPC2_32_RR_EQZ
:
4986 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
4988 case OPC2_32_RR_GE_A
:
4989 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4992 case OPC2_32_RR_LT_A
:
4993 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
4996 case OPC2_32_RR_MOV_A
:
4997 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
4999 case OPC2_32_RR_MOV_AA
:
5000 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
5002 case OPC2_32_RR_MOV_D
:
5003 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
5005 case OPC2_32_RR_NE_A
:
5006 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5009 case OPC2_32_RR_NEZ_A
:
5010 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
5012 case OPC2_32_RR_SUB_A
:
5013 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5018 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
5023 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5024 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5028 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5030 case OPC2_32_RR_JLI
:
5031 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
5032 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5034 case OPC2_32_RR_CALLI
:
5035 gen_helper_1arg(call
, ctx
->next_pc
);
5036 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5040 ctx
->bstate
= BS_BRANCH
;
5043 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
5050 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5051 r3
= MASK_OP_RR_D(ctx
->opcode
);
5052 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5053 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5056 case OPC2_32_RR_BMERGE
:
5057 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5059 case OPC2_32_RR_BSPLIT
:
5060 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5062 case OPC2_32_RR_DVINIT_B
:
5063 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5066 case OPC2_32_RR_DVINIT_BU
:
5067 temp
= tcg_temp_new();
5068 temp2
= tcg_temp_new();
5070 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5071 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5072 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5073 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5074 /* use cpu_PSW_AV to compare against 0 */
5075 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5076 temp
, cpu_gpr_d
[r3
+1]);
5077 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5078 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5079 temp2
, cpu_gpr_d
[r2
]);
5080 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5082 /* overflow = (D[b] == 0) */
5083 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5085 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5087 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5089 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 8);
5090 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
5091 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp
);
5093 tcg_temp_free(temp
);
5094 tcg_temp_free(temp2
);
5096 case OPC2_32_RR_DVINIT_H
:
5097 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5100 case OPC2_32_RR_DVINIT_HU
:
5101 temp
= tcg_temp_new();
5102 temp2
= tcg_temp_new();
5104 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5105 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5106 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5107 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5108 /* use cpu_PSW_AV to compare against 0 */
5109 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5110 temp
, cpu_gpr_d
[r3
+1]);
5111 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5112 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5113 temp2
, cpu_gpr_d
[r2
]);
5114 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5116 /* overflow = (D[b] == 0) */
5117 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5119 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5121 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5123 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5124 tcg_gen_shri_tl(cpu_gpr_d
[r3
+1], temp
, 16);
5125 tcg_gen_shli_tl(cpu_gpr_d
[r3
], temp
, 16);
5126 tcg_temp_free(temp
);
5127 tcg_temp_free(temp2
);
5129 case OPC2_32_RR_DVINIT
:
5130 temp
= tcg_temp_new();
5131 temp2
= tcg_temp_new();
5132 /* overflow = ((D[b] == 0) ||
5133 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
5134 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
5135 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
5136 tcg_gen_and_tl(temp
, temp
, temp2
);
5137 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
5138 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
5139 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5141 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5143 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5145 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5146 /* sign extend to high reg */
5147 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
5148 tcg_temp_free(temp
);
5149 tcg_temp_free(temp2
);
5151 case OPC2_32_RR_DVINIT_U
:
5152 /* overflow = (D[b] == 0) */
5153 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5154 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5156 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5158 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5160 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5161 /* zero extend to high reg*/
5162 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
5164 case OPC2_32_RR_PARITY
:
5165 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5167 case OPC2_32_RR_UNPACK
:
5168 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5174 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5182 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
5183 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
5184 r3
= MASK_OP_RR1_D(ctx
->opcode
);
5185 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
5186 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
5189 case OPC2_32_RR1_MUL_H_32_LL
:
5190 temp64
= tcg_temp_new_i64();
5191 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5192 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5193 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5194 tcg_temp_free_i64(temp64
);
5196 case OPC2_32_RR1_MUL_H_32_LU
:
5197 temp64
= tcg_temp_new_i64();
5198 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5199 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5200 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5201 tcg_temp_free_i64(temp64
);
5203 case OPC2_32_RR1_MUL_H_32_UL
:
5204 temp64
= tcg_temp_new_i64();
5205 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5206 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5207 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5208 tcg_temp_free_i64(temp64
);
5210 case OPC2_32_RR1_MUL_H_32_UU
:
5211 temp64
= tcg_temp_new_i64();
5212 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5213 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5214 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5215 tcg_temp_free_i64(temp64
);
5217 case OPC2_32_RR1_MULM_H_64_LL
:
5218 temp64
= tcg_temp_new_i64();
5219 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5220 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5222 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5224 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5225 tcg_temp_free_i64(temp64
);
5227 case OPC2_32_RR1_MULM_H_64_LU
:
5228 temp64
= tcg_temp_new_i64();
5229 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5230 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5232 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5234 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5235 tcg_temp_free_i64(temp64
);
5237 case OPC2_32_RR1_MULM_H_64_UL
:
5238 temp64
= tcg_temp_new_i64();
5239 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5240 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5242 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5244 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5245 tcg_temp_free_i64(temp64
);
5247 case OPC2_32_RR1_MULM_H_64_UU
:
5248 temp64
= tcg_temp_new_i64();
5249 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5250 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5252 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5254 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5255 tcg_temp_free_i64(temp64
);
5258 case OPC2_32_RR1_MULR_H_16_LL
:
5259 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5260 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5262 case OPC2_32_RR1_MULR_H_16_LU
:
5263 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5264 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5266 case OPC2_32_RR1_MULR_H_16_UL
:
5267 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5268 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5270 case OPC2_32_RR1_MULR_H_16_UU
:
5271 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5272 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5278 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
5286 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
5287 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
5288 r3
= MASK_OP_RR1_D(ctx
->opcode
);
5289 n
= MASK_OP_RR1_N(ctx
->opcode
);
5290 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
5292 temp
= tcg_temp_new();
5293 temp2
= tcg_temp_new();
5296 case OPC2_32_RR1_MUL_Q_32
:
5297 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
5299 case OPC2_32_RR1_MUL_Q_64
:
5300 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5303 case OPC2_32_RR1_MUL_Q_32_L
:
5304 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
5305 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
5307 case OPC2_32_RR1_MUL_Q_64_L
:
5308 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
5309 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
5311 case OPC2_32_RR1_MUL_Q_32_U
:
5312 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
5313 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
5315 case OPC2_32_RR1_MUL_Q_64_U
:
5316 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
5317 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
5319 case OPC2_32_RR1_MUL_Q_32_LL
:
5320 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
5321 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
5322 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5324 case OPC2_32_RR1_MUL_Q_32_UU
:
5325 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
5326 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
5327 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5329 case OPC2_32_RR1_MULR_Q_32_L
:
5330 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
5331 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
5332 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5334 case OPC2_32_RR1_MULR_Q_32_U
:
5335 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
5336 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
5337 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5340 tcg_temp_free(temp
);
5341 tcg_temp_free(temp2
);
5345 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5350 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
5351 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
5352 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
5353 r3
= MASK_OP_RR2_D(ctx
->opcode
);
5355 case OPC2_32_RR2_MUL_32
:
5356 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5358 case OPC2_32_RR2_MUL_64
:
5359 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5362 case OPC2_32_RR2_MULS_32
:
5363 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5366 case OPC2_32_RR2_MUL_U_64
:
5367 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5370 case OPC2_32_RR2_MULS_U_32
:
5371 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5378 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5384 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
5385 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
5386 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
5387 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
5388 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
5389 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
5392 case OPC2_32_RRPW_EXTR
:
5393 if (pos
+ width
<= 31) {
5394 /* optimize special cases */
5395 if ((pos
== 0) && (width
== 8)) {
5396 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5397 } else if ((pos
== 0) && (width
== 16)) {
5398 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5400 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
5401 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
5405 case OPC2_32_RRPW_EXTR_U
:
5407 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
5409 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
5410 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
5413 case OPC2_32_RRPW_IMASK
:
5414 if (pos
+ width
<= 31) {
5415 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
5416 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
5419 case OPC2_32_RRPW_INSERT
:
5420 if (pos
+ width
<= 31) {
5421 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5429 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5435 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
5436 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
5437 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
5438 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
5439 r4
= MASK_OP_RRR_D(ctx
->opcode
);
5442 case OPC2_32_RRR_CADD
:
5443 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5444 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
5446 case OPC2_32_RRR_CADDN
:
5447 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5450 case OPC2_32_RRR_CSUB
:
5451 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5454 case OPC2_32_RRR_CSUBN
:
5455 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5458 case OPC2_32_RRR_SEL
:
5459 temp
= tcg_const_i32(0);
5460 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5461 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5462 tcg_temp_free(temp
);
5464 case OPC2_32_RRR_SELN
:
5465 temp
= tcg_const_i32(0);
5466 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5467 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5468 tcg_temp_free(temp
);
5473 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
5479 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
5480 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
5481 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
5482 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
5483 r4
= MASK_OP_RRR_D(ctx
->opcode
);
5486 case OPC2_32_RRR_DVADJ
:
5487 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5488 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5490 case OPC2_32_RRR_DVSTEP
:
5491 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5492 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5494 case OPC2_32_RRR_DVSTEP_U
:
5495 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5496 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5498 case OPC2_32_RRR_IXMAX
:
5499 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5500 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5502 case OPC2_32_RRR_IXMAX_U
:
5503 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5504 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5506 case OPC2_32_RRR_IXMIN
:
5507 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5508 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5510 case OPC2_32_RRR_IXMIN_U
:
5511 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5512 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5514 case OPC2_32_RRR_PACK
:
5515 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
5516 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5522 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5525 uint32_t r1
, r2
, r3
, r4
;
5527 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
5528 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
5529 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
5530 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
5531 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
5533 case OPC2_32_RRR2_MADD_32
:
5534 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
5537 case OPC2_32_RRR2_MADD_64
:
5538 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5539 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5541 case OPC2_32_RRR2_MADDS_32
:
5542 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5543 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5545 case OPC2_32_RRR2_MADDS_64
:
5546 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5547 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5549 case OPC2_32_RRR2_MADD_U_64
:
5550 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5551 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5553 case OPC2_32_RRR2_MADDS_U_32
:
5554 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5555 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5557 case OPC2_32_RRR2_MADDS_U_64
:
5558 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5559 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5564 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5567 uint32_t r1
, r2
, r3
, r4
;
5569 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
5570 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
5571 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
5572 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
5573 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
5576 case OPC2_32_RRR2_MSUB_32
:
5577 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
5580 case OPC2_32_RRR2_MSUB_64
:
5581 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5582 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5584 case OPC2_32_RRR2_MSUBS_32
:
5585 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5586 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5588 case OPC2_32_RRR2_MSUBS_64
:
5589 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5590 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5592 case OPC2_32_RRR2_MSUB_U_64
:
5593 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5594 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5596 case OPC2_32_RRR2_MSUBS_U_32
:
5597 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5598 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5600 case OPC2_32_RRR2_MSUBS_U_64
:
5601 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5602 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5608 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5611 uint32_t r1
, r2
, r3
, r4
, n
;
5613 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
5614 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
5615 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
5616 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
5617 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
5618 n
= MASK_OP_RRR1_N(ctx
->opcode
);
5621 case OPC2_32_RRR1_MADD_H_LL
:
5622 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5623 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5625 case OPC2_32_RRR1_MADD_H_LU
:
5626 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5627 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5629 case OPC2_32_RRR1_MADD_H_UL
:
5630 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5631 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5633 case OPC2_32_RRR1_MADD_H_UU
:
5634 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5635 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5637 case OPC2_32_RRR1_MADDS_H_LL
:
5638 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5639 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5641 case OPC2_32_RRR1_MADDS_H_LU
:
5642 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5643 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5645 case OPC2_32_RRR1_MADDS_H_UL
:
5646 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5649 case OPC2_32_RRR1_MADDS_H_UU
:
5650 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5651 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5653 case OPC2_32_RRR1_MADDM_H_LL
:
5654 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5655 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5657 case OPC2_32_RRR1_MADDM_H_LU
:
5658 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5659 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5661 case OPC2_32_RRR1_MADDM_H_UL
:
5662 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5663 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5665 case OPC2_32_RRR1_MADDM_H_UU
:
5666 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5667 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5669 case OPC2_32_RRR1_MADDMS_H_LL
:
5670 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5671 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5673 case OPC2_32_RRR1_MADDMS_H_LU
:
5674 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5675 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5677 case OPC2_32_RRR1_MADDMS_H_UL
:
5678 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5679 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5681 case OPC2_32_RRR1_MADDMS_H_UU
:
5682 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5683 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5685 case OPC2_32_RRR1_MADDR_H_LL
:
5686 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5687 cpu_gpr_d
[r2
], n
, MODE_LL
);
5689 case OPC2_32_RRR1_MADDR_H_LU
:
5690 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5691 cpu_gpr_d
[r2
], n
, MODE_LU
);
5693 case OPC2_32_RRR1_MADDR_H_UL
:
5694 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5695 cpu_gpr_d
[r2
], n
, MODE_UL
);
5697 case OPC2_32_RRR1_MADDR_H_UU
:
5698 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5699 cpu_gpr_d
[r2
], n
, MODE_UU
);
5701 case OPC2_32_RRR1_MADDRS_H_LL
:
5702 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5703 cpu_gpr_d
[r2
], n
, MODE_LL
);
5705 case OPC2_32_RRR1_MADDRS_H_LU
:
5706 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5707 cpu_gpr_d
[r2
], n
, MODE_LU
);
5709 case OPC2_32_RRR1_MADDRS_H_UL
:
5710 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5711 cpu_gpr_d
[r2
], n
, MODE_UL
);
5713 case OPC2_32_RRR1_MADDRS_H_UU
:
5714 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5715 cpu_gpr_d
[r2
], n
, MODE_UU
);
5720 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
5724 int32_t address
, const16
;
5727 TCGv temp
, temp2
, temp3
;
5729 op1
= MASK_OP_MAJOR(ctx
->opcode
);
5731 /* handle JNZ.T opcode only being 7 bit long */
5732 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
5733 op1
= OPCM_32_BRN_JTT
;
5738 case OPCM_32_ABS_LDW
:
5739 decode_abs_ldw(env
, ctx
);
5741 case OPCM_32_ABS_LDB
:
5742 decode_abs_ldb(env
, ctx
);
5744 case OPCM_32_ABS_LDMST_SWAP
:
5745 decode_abs_ldst_swap(env
, ctx
);
5747 case OPCM_32_ABS_LDST_CONTEXT
:
5748 decode_abs_ldst_context(env
, ctx
);
5750 case OPCM_32_ABS_STORE
:
5751 decode_abs_store(env
, ctx
);
5753 case OPCM_32_ABS_STOREB_H
:
5754 decode_abs_storeb_h(env
, ctx
);
5756 case OPC1_32_ABS_STOREQ
:
5757 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
5758 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
5759 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
5760 temp2
= tcg_temp_new();
5762 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
5763 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
5765 tcg_temp_free(temp2
);
5766 tcg_temp_free(temp
);
5768 case OPC1_32_ABS_LD_Q
:
5769 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
5770 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
5771 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
5773 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
5774 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5776 tcg_temp_free(temp
);
5778 case OPC1_32_ABS_LEA
:
5779 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
5780 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
5781 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
5784 case OPC1_32_ABSB_ST_T
:
5785 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
5786 b
= MASK_OP_ABSB_B(ctx
->opcode
);
5787 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
5789 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
5790 temp2
= tcg_temp_new();
5792 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
5793 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
5794 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
5795 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
5797 tcg_temp_free(temp
);
5798 tcg_temp_free(temp2
);
5801 case OPC1_32_B_CALL
:
5802 case OPC1_32_B_CALLA
:
5807 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
5808 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
5811 case OPCM_32_BIT_ANDACC
:
5812 decode_bit_andacc(env
, ctx
);
5814 case OPCM_32_BIT_LOGICAL_T1
:
5815 decode_bit_logical_t(env
, ctx
);
5817 case OPCM_32_BIT_INSERT
:
5818 decode_bit_insert(env
, ctx
);
5820 case OPCM_32_BIT_LOGICAL_T2
:
5821 decode_bit_logical_t2(env
, ctx
);
5823 case OPCM_32_BIT_ORAND
:
5824 decode_bit_orand(env
, ctx
);
5826 case OPCM_32_BIT_SH_LOGIC1
:
5827 decode_bit_sh_logic1(env
, ctx
);
5829 case OPCM_32_BIT_SH_LOGIC2
:
5830 decode_bit_sh_logic2(env
, ctx
);
5833 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
5834 decode_bo_addrmode_post_pre_base(env
, ctx
);
5836 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
5837 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
5839 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
5840 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
5842 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
5843 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
5845 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
5846 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
5848 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
5849 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
5852 case OPC1_32_BOL_LD_A_LONGOFF
:
5853 case OPC1_32_BOL_LD_W_LONGOFF
:
5854 case OPC1_32_BOL_LEA_LONGOFF
:
5855 case OPC1_32_BOL_ST_W_LONGOFF
:
5856 case OPC1_32_BOL_ST_A_LONGOFF
:
5857 case OPC1_32_BOL_LD_B_LONGOFF
:
5858 case OPC1_32_BOL_LD_BU_LONGOFF
:
5859 case OPC1_32_BOL_LD_H_LONGOFF
:
5860 case OPC1_32_BOL_LD_HU_LONGOFF
:
5861 case OPC1_32_BOL_ST_B_LONGOFF
:
5862 case OPC1_32_BOL_ST_H_LONGOFF
:
5863 decode_bol_opc(env
, ctx
, op1
);
5866 case OPCM_32_BRC_EQ_NEQ
:
5867 case OPCM_32_BRC_GE
:
5868 case OPCM_32_BRC_JLT
:
5869 case OPCM_32_BRC_JNE
:
5870 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
5871 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
5872 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
5873 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
5876 case OPCM_32_BRN_JTT
:
5877 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
5878 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
5879 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
5882 case OPCM_32_BRR_EQ_NEQ
:
5883 case OPCM_32_BRR_ADDR_EQ_NEQ
:
5884 case OPCM_32_BRR_GE
:
5885 case OPCM_32_BRR_JLT
:
5886 case OPCM_32_BRR_JNE
:
5887 case OPCM_32_BRR_JNZ
:
5888 case OPCM_32_BRR_LOOP
:
5889 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
5890 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
5891 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
5892 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
5895 case OPCM_32_RC_LOGICAL_SHIFT
:
5896 decode_rc_logical_shift(env
, ctx
);
5898 case OPCM_32_RC_ACCUMULATOR
:
5899 decode_rc_accumulator(env
, ctx
);
5901 case OPCM_32_RC_SERVICEROUTINE
:
5902 decode_rc_serviceroutine(env
, ctx
);
5904 case OPCM_32_RC_MUL
:
5905 decode_rc_mul(env
, ctx
);
5908 case OPCM_32_RCPW_MASK_INSERT
:
5909 decode_rcpw_insert(env
, ctx
);
5912 case OPC1_32_RCRR_INSERT
:
5913 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
5914 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
5915 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
5916 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
5917 temp
= tcg_const_i32(const16
);
5918 temp2
= tcg_temp_new(); /* width*/
5919 temp3
= tcg_temp_new(); /* pos */
5921 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
5922 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5924 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
5926 tcg_temp_free(temp
);
5927 tcg_temp_free(temp2
);
5928 tcg_temp_free(temp3
);
5931 case OPCM_32_RCRW_MASK_INSERT
:
5932 decode_rcrw_insert(env
, ctx
);
5935 case OPCM_32_RCR_COND_SELECT
:
5936 decode_rcr_cond_select(env
, ctx
);
5938 case OPCM_32_RCR_MADD
:
5939 decode_rcr_madd(env
, ctx
);
5941 case OPCM_32_RCR_MSUB
:
5942 decode_rcr_msub(env
, ctx
);
5945 case OPC1_32_RLC_ADDI
:
5946 case OPC1_32_RLC_ADDIH
:
5947 case OPC1_32_RLC_ADDIH_A
:
5948 case OPC1_32_RLC_MFCR
:
5949 case OPC1_32_RLC_MOV
:
5950 case OPC1_32_RLC_MOV_64
:
5951 case OPC1_32_RLC_MOV_U
:
5952 case OPC1_32_RLC_MOV_H
:
5953 case OPC1_32_RLC_MOVH_A
:
5954 case OPC1_32_RLC_MTCR
:
5955 decode_rlc_opc(env
, ctx
, op1
);
5958 case OPCM_32_RR_ACCUMULATOR
:
5959 decode_rr_accumulator(env
, ctx
);
5961 case OPCM_32_RR_LOGICAL_SHIFT
:
5962 decode_rr_logical_shift(env
, ctx
);
5964 case OPCM_32_RR_ADDRESS
:
5965 decode_rr_address(env
, ctx
);
5967 case OPCM_32_RR_IDIRECT
:
5968 decode_rr_idirect(env
, ctx
);
5970 case OPCM_32_RR_DIVIDE
:
5971 decode_rr_divide(env
, ctx
);
5974 case OPCM_32_RR1_MUL
:
5975 decode_rr1_mul(env
, ctx
);
5977 case OPCM_32_RR1_MULQ
:
5978 decode_rr1_mulq(env
, ctx
);
5981 case OPCM_32_RR2_MUL
:
5982 decode_rr2_mul(env
, ctx
);
5985 case OPCM_32_RRPW_EXTRACT_INSERT
:
5986 decode_rrpw_extract_insert(env
, ctx
);
5988 case OPC1_32_RRPW_DEXTR
:
5989 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
5990 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
5991 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
5992 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
5994 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
5996 temp
= tcg_temp_new();
5997 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], const16
);
5998 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 32 - const16
);
5999 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
6000 tcg_temp_free(temp
);
6004 case OPCM_32_RRR_COND_SELECT
:
6005 decode_rrr_cond_select(env
, ctx
);
6007 case OPCM_32_RRR_DIVIDE
:
6008 decode_rrr_divide(env
, ctx
);
6010 case OPCM_32_RRR2_MADD
:
6011 decode_rrr2_madd(env
, ctx
);
6013 case OPCM_32_RRR2_MSUB
:
6014 decode_rrr2_msub(env
, ctx
);
6017 case OPCM_32_RRR1_MADD
:
6018 decode_rrr1_madd(env
, ctx
);
6023 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
6025 /* 16-Bit Instruction */
6026 if ((ctx
->opcode
& 0x1) == 0) {
6027 ctx
->next_pc
= ctx
->pc
+ 2;
6028 decode_16Bit_opc(env
, ctx
);
6029 /* 32-Bit Instruction */
6031 ctx
->next_pc
= ctx
->pc
+ 4;
6032 decode_32Bit_opc(env
, ctx
);
6037 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
6040 CPUState
*cs
= CPU(cpu
);
6041 CPUTriCoreState
*env
= &cpu
->env
;
6043 target_ulong pc_start
;
6047 qemu_log("search pc %d\n", search_pc
);
6055 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
6056 ctx
.bstate
= BS_NONE
;
6057 ctx
.mem_idx
= cpu_mmu_index(env
);
6059 tcg_clear_temp_count();
6061 while (ctx
.bstate
== BS_NONE
) {
6062 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
6063 decode_opc(env
, &ctx
, 0);
6067 if (tcg_op_buf_full()) {
6068 gen_save_pc(ctx
.next_pc
);
6073 gen_save_pc(ctx
.next_pc
);
6077 ctx
.pc
= ctx
.next_pc
;
6080 gen_tb_end(tb
, num_insns
);
6082 printf("done_generating search pc\n");
6084 tb
->size
= ctx
.pc
- pc_start
;
6085 tb
->icount
= num_insns
;
6087 if (tcg_check_temp_count()) {
6088 printf("LEAK at %08x\n", env
->PC
);
6092 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
6093 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
6094 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
6101 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
6103 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
6107 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
6109 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
6113 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
6115 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
6123 void cpu_state_reset(CPUTriCoreState
*env
)
6125 /* Reset Regs to Default Value */
6129 static void tricore_tcg_init_csfr(void)
6131 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
6132 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
6133 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
6134 offsetof(CPUTriCoreState
, PSW
), "PSW");
6135 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
6136 offsetof(CPUTriCoreState
, PC
), "PC");
6137 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
6138 offsetof(CPUTriCoreState
, ICR
), "ICR");
6141 void tricore_tcg_init(void)
6148 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
6150 for (i
= 0 ; i
< 16 ; i
++) {
6151 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
6152 offsetof(CPUTriCoreState
, gpr_a
[i
]),
6155 for (i
= 0 ; i
< 16 ; i
++) {
6156 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
6157 offsetof(CPUTriCoreState
, gpr_d
[i
]),
6160 tricore_tcg_init_csfr();
6161 /* init PSW flag cache */
6162 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
6163 offsetof(CPUTriCoreState
, PSW_USB_C
),
6165 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
6166 offsetof(CPUTriCoreState
, PSW_USB_V
),
6168 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
6169 offsetof(CPUTriCoreState
, PSW_USB_SV
),
6171 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
6172 offsetof(CPUTriCoreState
, PSW_USB_AV
),
6174 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
6175 offsetof(CPUTriCoreState
, PSW_USB_SAV
),