2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
205 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
206 ((offset & 0x0fffff) << 1))
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, TCGMemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
219 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
233 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
235 tcg_temp_free_i64(temp
);
238 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
241 TCGv temp
= tcg_temp_new();
242 tcg_gen_addi_tl(temp
, base
, con
);
243 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
247 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
249 TCGv_i64 temp
= tcg_temp_new_i64();
251 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
252 /* write back to two 32 bit regs */
253 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
255 tcg_temp_free_i64(temp
);
258 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, base
, con
);
263 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
267 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
277 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
288 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
290 TCGv temp
= tcg_temp_new();
291 TCGv temp2
= tcg_temp_new();
293 /* temp = (M(EA, word) */
294 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* temp = temp & ~E[a][63:32]) */
296 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
297 /* temp2 = (E[a][31:0] & E[a][63:32]); */
298 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
299 /* temp = temp | temp2; */
300 tcg_gen_or_tl(temp
, temp
, temp2
);
301 /* M(EA, word) = temp; */
302 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_temp_free(temp2
);
308 /* tmp = M(EA, word);
311 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
315 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
322 /* We generate loads and store to core special function register (csfr) through
323 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
324 makros R, A and E, which allow read-only, all and endinit protected access.
325 These makros also specify in which ISA version the csfr was introduced. */
326 #define R(ADDRESS, REG, FEATURE) \
328 if (tricore_feature(env, FEATURE)) { \
329 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
332 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
333 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
334 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
336 /* since we're caching PSW make this a special case */
337 if (offset
== 0xfe04) {
338 gen_helper_psw_read(ret
, cpu_env
);
349 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
350 since no execption occurs */
351 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
353 if (tricore_feature(env, FEATURE)) { \
354 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
357 /* Endinit protected registers
358 TODO: Since the endinit bit is in a register of a not yet implemented
359 watchdog device, we handle endinit protected registers like
360 all-access registers for now. */
361 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
362 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
365 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
366 /* since we're caching PSW make this a special case */
367 if (offset
== 0xfe04) {
368 gen_helper_psw_write(cpu_env
, r1
);
375 /* generate privilege trap */
379 /* Functions for arithmetic instructions */
381 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
383 TCGv t0
= tcg_temp_new_i32();
384 TCGv result
= tcg_temp_new_i32();
385 /* Addition and set V/SV bits */
386 tcg_gen_add_tl(result
, r1
, r2
);
388 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
389 tcg_gen_xor_tl(t0
, r1
, r2
);
390 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
392 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
393 /* Calc AV/SAV bits */
394 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
395 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
397 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
398 /* write back result */
399 tcg_gen_mov_tl(ret
, result
);
401 tcg_temp_free(result
);
406 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
408 TCGv temp
= tcg_temp_new();
409 TCGv_i64 t0
= tcg_temp_new_i64();
410 TCGv_i64 t1
= tcg_temp_new_i64();
411 TCGv_i64 result
= tcg_temp_new_i64();
413 tcg_gen_add_i64(result
, r1
, r2
);
415 tcg_gen_xor_i64(t1
, result
, r1
);
416 tcg_gen_xor_i64(t0
, r1
, r2
);
417 tcg_gen_andc_i64(t1
, t1
, t0
);
418 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
420 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
421 /* calc AV/SAV bits */
422 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
423 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
424 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
426 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
427 /* write back result */
428 tcg_gen_mov_i64(ret
, result
);
431 tcg_temp_free_i64(result
);
432 tcg_temp_free_i64(t0
);
433 tcg_temp_free_i64(t1
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 tcg_temp_free(temp2
);
483 tcg_temp_free(temp3
);
484 tcg_temp_free(temp4
);
487 /* ret = r2 + (r1 * r3); */
488 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
490 TCGv_i64 t1
= tcg_temp_new_i64();
491 TCGv_i64 t2
= tcg_temp_new_i64();
492 TCGv_i64 t3
= tcg_temp_new_i64();
494 tcg_gen_ext_i32_i64(t1
, r1
);
495 tcg_gen_ext_i32_i64(t2
, r2
);
496 tcg_gen_ext_i32_i64(t3
, r3
);
498 tcg_gen_mul_i64(t1
, t1
, t3
);
499 tcg_gen_add_i64(t1
, t2
, t1
);
501 tcg_gen_trunc_i64_i32(ret
, t1
);
504 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
505 /* t1 < -0x80000000 */
506 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
507 tcg_gen_or_i64(t2
, t2
, t3
);
508 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 /* Calc AV/SAV bits */
513 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
514 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
516 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
518 tcg_temp_free_i64(t1
);
519 tcg_temp_free_i64(t2
);
520 tcg_temp_free_i64(t3
);
523 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
525 TCGv temp
= tcg_const_i32(con
);
526 gen_madd32_d(ret
, r1
, r2
, temp
);
531 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
534 TCGv t1
= tcg_temp_new();
535 TCGv t2
= tcg_temp_new();
536 TCGv t3
= tcg_temp_new();
537 TCGv t4
= tcg_temp_new();
539 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
540 /* only the add can overflow */
541 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
543 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
544 tcg_gen_xor_tl(t1
, r2_high
, t2
);
545 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
547 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
548 /* Calc AV/SAV bits */
549 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
550 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
552 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
553 /* write back the result */
554 tcg_gen_mov_tl(ret_low
, t3
);
555 tcg_gen_mov_tl(ret_high
, t4
);
564 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
567 TCGv_i64 t1
= tcg_temp_new_i64();
568 TCGv_i64 t2
= tcg_temp_new_i64();
569 TCGv_i64 t3
= tcg_temp_new_i64();
571 tcg_gen_extu_i32_i64(t1
, r1
);
572 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
573 tcg_gen_extu_i32_i64(t3
, r3
);
575 tcg_gen_mul_i64(t1
, t1
, t3
);
576 tcg_gen_add_i64(t2
, t2
, t1
);
577 /* write back result */
578 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
579 /* only the add overflows, if t2 < t1
581 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
582 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
583 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
585 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
586 /* Calc AV/SAV bits */
587 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
588 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
590 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
592 tcg_temp_free_i64(t1
);
593 tcg_temp_free_i64(t2
);
594 tcg_temp_free_i64(t3
);
598 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
601 TCGv temp
= tcg_const_i32(con
);
602 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
607 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
610 TCGv temp
= tcg_const_i32(con
);
611 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
616 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
617 TCGv r3
, uint32_t n
, uint32_t mode
)
619 TCGv temp
= tcg_const_i32(n
);
620 TCGv temp2
= tcg_temp_new();
621 TCGv_i64 temp64
= tcg_temp_new_i64();
624 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
627 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
630 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
633 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
636 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
637 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
638 tcg_gen_add_tl
, tcg_gen_add_tl
);
640 tcg_temp_free(temp2
);
641 tcg_temp_free_i64(temp64
);
645 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
646 TCGv r3
, uint32_t n
, uint32_t mode
)
648 TCGv temp
= tcg_const_i32(n
);
649 TCGv temp2
= tcg_temp_new();
650 TCGv_i64 temp64
= tcg_temp_new_i64();
653 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
656 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
659 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
662 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
665 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
666 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
667 tcg_gen_sub_tl
, tcg_gen_add_tl
);
669 tcg_temp_free(temp2
);
670 tcg_temp_free_i64(temp64
);
674 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
675 TCGv r3
, uint32_t n
, uint32_t mode
)
677 TCGv temp
= tcg_const_i32(n
);
678 TCGv_i64 temp64
= tcg_temp_new_i64();
679 TCGv_i64 temp64_2
= tcg_temp_new_i64();
680 TCGv_i64 temp64_3
= tcg_temp_new_i64();
683 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
686 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
689 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
692 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
695 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
696 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
697 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
698 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
699 tcg_gen_shli_i64(temp64
, temp64
, 16);
701 gen_add64_d(temp64_2
, temp64_3
, temp64
);
702 /* write back result */
703 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
706 tcg_temp_free_i64(temp64
);
707 tcg_temp_free_i64(temp64_2
);
708 tcg_temp_free_i64(temp64_3
);
711 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
714 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
715 TCGv r3
, uint32_t n
, uint32_t mode
)
717 TCGv temp
= tcg_const_i32(n
);
718 TCGv temp2
= tcg_temp_new();
719 TCGv temp3
= tcg_temp_new();
720 TCGv_i64 temp64
= tcg_temp_new_i64();
724 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
727 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
730 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
733 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
736 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
737 gen_adds(ret_low
, r1_low
, temp
);
738 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
739 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
740 gen_adds(ret_high
, r1_high
, temp2
);
742 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
743 /* combine av bits */
744 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
747 tcg_temp_free(temp2
);
748 tcg_temp_free(temp3
);
749 tcg_temp_free_i64(temp64
);
753 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
756 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
757 TCGv r3
, uint32_t n
, uint32_t mode
)
759 TCGv temp
= tcg_const_i32(n
);
760 TCGv temp2
= tcg_temp_new();
761 TCGv temp3
= tcg_temp_new();
762 TCGv_i64 temp64
= tcg_temp_new_i64();
766 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
769 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
772 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
775 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
778 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
779 gen_subs(ret_low
, r1_low
, temp
);
780 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
781 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
782 gen_adds(ret_high
, r1_high
, temp2
);
784 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
785 /* combine av bits */
786 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
789 tcg_temp_free(temp2
);
790 tcg_temp_free(temp3
);
791 tcg_temp_free_i64(temp64
);
796 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
797 TCGv r3
, uint32_t n
, uint32_t mode
)
799 TCGv temp
= tcg_const_i32(n
);
800 TCGv_i64 temp64
= tcg_temp_new_i64();
801 TCGv_i64 temp64_2
= tcg_temp_new_i64();
805 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
808 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
811 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
814 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
817 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
818 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
819 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
820 tcg_gen_shli_i64(temp64
, temp64
, 16);
821 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
823 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
824 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
827 tcg_temp_free_i64(temp64
);
828 tcg_temp_free_i64(temp64_2
);
833 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
834 TCGv r3
, uint32_t n
, uint32_t mode
)
836 TCGv temp
= tcg_const_i32(n
);
837 TCGv_i64 temp64
= tcg_temp_new_i64();
838 TCGv_i64 temp64_2
= tcg_temp_new_i64();
839 TCGv_i64 temp64_3
= tcg_temp_new_i64();
842 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
845 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
848 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
851 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
854 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
855 gen_add64_d(temp64_3
, temp64_2
, temp64
);
856 /* write back result */
857 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
860 tcg_temp_free_i64(temp64
);
861 tcg_temp_free_i64(temp64_2
);
862 tcg_temp_free_i64(temp64_3
);
866 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
867 TCGv r3
, uint32_t n
, uint32_t mode
)
869 TCGv temp
= tcg_const_i32(n
);
870 TCGv_i64 temp64
= tcg_temp_new_i64();
871 TCGv_i64 temp64_2
= tcg_temp_new_i64();
874 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
877 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
880 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
883 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
886 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
887 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
888 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
891 tcg_temp_free_i64(temp64
);
892 tcg_temp_free_i64(temp64_2
);
896 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
899 TCGv temp
= tcg_const_i32(n
);
900 TCGv_i64 temp64
= tcg_temp_new_i64();
903 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
906 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
909 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
912 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
915 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
918 tcg_temp_free_i64(temp64
);
922 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
924 TCGv temp
= tcg_temp_new();
925 TCGv temp2
= tcg_temp_new();
927 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
928 tcg_gen_shli_tl(temp
, r1
, 16);
929 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
932 tcg_temp_free(temp2
);
936 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
938 TCGv temp
= tcg_const_i32(n
);
939 TCGv temp2
= tcg_temp_new();
940 TCGv_i64 temp64
= tcg_temp_new_i64();
943 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
946 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
949 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
952 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
955 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
956 tcg_gen_shli_tl(temp
, r1
, 16);
957 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
960 tcg_temp_free(temp2
);
961 tcg_temp_free_i64(temp64
);
966 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
967 uint32_t n
, uint32_t mode
)
969 TCGv temp
= tcg_const_i32(n
);
970 TCGv_i64 temp64
= tcg_temp_new_i64();
973 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
976 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
979 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
982 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
985 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
988 tcg_temp_free_i64(temp64
);
992 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
994 TCGv temp
= tcg_temp_new();
995 TCGv temp2
= tcg_temp_new();
997 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
998 tcg_gen_shli_tl(temp
, r1
, 16);
999 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1001 tcg_temp_free(temp
);
1002 tcg_temp_free(temp2
);
1006 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1008 TCGv temp
= tcg_const_i32(n
);
1009 TCGv temp2
= tcg_temp_new();
1010 TCGv_i64 temp64
= tcg_temp_new_i64();
1013 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1016 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1019 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1022 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1025 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1026 tcg_gen_shli_tl(temp
, r1
, 16);
1027 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1029 tcg_temp_free(temp
);
1030 tcg_temp_free(temp2
);
1031 tcg_temp_free_i64(temp64
);
1035 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1037 TCGv temp
= tcg_const_i32(n
);
1038 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1039 tcg_temp_free(temp
);
1043 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1045 TCGv temp
= tcg_const_i32(n
);
1046 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1047 tcg_temp_free(temp
);
1051 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1052 uint32_t up_shift
, CPUTriCoreState
*env
)
1054 TCGv temp
= tcg_temp_new();
1055 TCGv temp2
= tcg_temp_new();
1056 TCGv temp3
= tcg_temp_new();
1057 TCGv_i64 t1
= tcg_temp_new_i64();
1058 TCGv_i64 t2
= tcg_temp_new_i64();
1059 TCGv_i64 t3
= tcg_temp_new_i64();
1061 tcg_gen_ext_i32_i64(t2
, arg2
);
1062 tcg_gen_ext_i32_i64(t3
, arg3
);
1064 tcg_gen_mul_i64(t2
, t2
, t3
);
1065 tcg_gen_shli_i64(t2
, t2
, n
);
1067 tcg_gen_ext_i32_i64(t1
, arg1
);
1068 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1070 tcg_gen_add_i64(t3
, t1
, t2
);
1071 tcg_gen_trunc_i64_i32(temp3
, t3
);
1073 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1074 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1075 tcg_gen_or_i64(t1
, t1
, t2
);
1076 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1077 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1078 /* We produce an overflow on the host if the mul before was
1079 (0x80000000 * 0x80000000) << 1). If this is the
1080 case, we negate the ovf. */
1082 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1083 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1084 tcg_gen_and_tl(temp
, temp
, temp2
);
1085 tcg_gen_shli_tl(temp
, temp
, 31);
1086 /* negate v bit, if special condition */
1087 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1090 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1091 /* Calc AV/SAV bits */
1092 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1093 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1095 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1096 /* write back result */
1097 tcg_gen_mov_tl(ret
, temp3
);
1099 tcg_temp_free(temp
);
1100 tcg_temp_free(temp2
);
1101 tcg_temp_free(temp3
);
1102 tcg_temp_free_i64(t1
);
1103 tcg_temp_free_i64(t2
);
1104 tcg_temp_free_i64(t3
);
1108 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1110 TCGv temp
= tcg_temp_new();
1111 TCGv temp2
= tcg_temp_new();
1113 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1114 } else { /* n is exspected to be 1 */
1115 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1116 tcg_gen_shli_tl(temp
, temp
, 1);
1117 /* catch special case r1 = r2 = 0x8000 */
1118 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1119 tcg_gen_sub_tl(temp
, temp
, temp2
);
1121 gen_add_d(ret
, arg1
, temp
);
1123 tcg_temp_free(temp
);
1124 tcg_temp_free(temp2
);
1128 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1130 TCGv temp
= tcg_temp_new();
1131 TCGv temp2
= tcg_temp_new();
1133 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1134 } else { /* n is exspected to be 1 */
1135 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1136 tcg_gen_shli_tl(temp
, temp
, 1);
1137 /* catch special case r1 = r2 = 0x8000 */
1138 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1139 tcg_gen_sub_tl(temp
, temp
, temp2
);
1141 gen_adds(ret
, arg1
, temp
);
1143 tcg_temp_free(temp
);
1144 tcg_temp_free(temp2
);
1148 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1149 TCGv arg3
, uint32_t n
)
1151 TCGv temp
= tcg_temp_new();
1152 TCGv temp2
= tcg_temp_new();
1153 TCGv_i64 t1
= tcg_temp_new_i64();
1154 TCGv_i64 t2
= tcg_temp_new_i64();
1155 TCGv_i64 t3
= tcg_temp_new_i64();
1158 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1159 } else { /* n is exspected to be 1 */
1160 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1161 tcg_gen_shli_tl(temp
, temp
, 1);
1162 /* catch special case r1 = r2 = 0x8000 */
1163 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1164 tcg_gen_sub_tl(temp
, temp
, temp2
);
1166 tcg_gen_ext_i32_i64(t2
, temp
);
1167 tcg_gen_shli_i64(t2
, t2
, 16);
1168 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1169 gen_add64_d(t3
, t1
, t2
);
1170 /* write back result */
1171 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1173 tcg_temp_free_i64(t1
);
1174 tcg_temp_free_i64(t2
);
1175 tcg_temp_free_i64(t3
);
1176 tcg_temp_free(temp
);
1177 tcg_temp_free(temp2
);
1181 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1182 TCGv arg3
, uint32_t n
)
1184 TCGv temp
= tcg_temp_new();
1185 TCGv temp2
= tcg_temp_new();
1186 TCGv_i64 t1
= tcg_temp_new_i64();
1187 TCGv_i64 t2
= tcg_temp_new_i64();
1190 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1191 } else { /* n is exspected to be 1 */
1192 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1193 tcg_gen_shli_tl(temp
, temp
, 1);
1194 /* catch special case r1 = r2 = 0x8000 */
1195 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1196 tcg_gen_sub_tl(temp
, temp
, temp2
);
1198 tcg_gen_ext_i32_i64(t2
, temp
);
1199 tcg_gen_shli_i64(t2
, t2
, 16);
1200 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1202 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1203 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1205 tcg_temp_free(temp
);
1206 tcg_temp_free(temp2
);
1207 tcg_temp_free_i64(t1
);
1208 tcg_temp_free_i64(t2
);
1212 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1213 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1215 TCGv_i64 t1
= tcg_temp_new_i64();
1216 TCGv_i64 t2
= tcg_temp_new_i64();
1217 TCGv_i64 t3
= tcg_temp_new_i64();
1218 TCGv_i64 t4
= tcg_temp_new_i64();
1221 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1222 tcg_gen_ext_i32_i64(t2
, arg2
);
1223 tcg_gen_ext_i32_i64(t3
, arg3
);
1225 tcg_gen_mul_i64(t2
, t2
, t3
);
1227 tcg_gen_shli_i64(t2
, t2
, 1);
1229 tcg_gen_add_i64(t4
, t1
, t2
);
1231 tcg_gen_xor_i64(t3
, t4
, t1
);
1232 tcg_gen_xor_i64(t2
, t1
, t2
);
1233 tcg_gen_andc_i64(t3
, t3
, t2
);
1234 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
1235 /* We produce an overflow on the host if the mul before was
1236 (0x80000000 * 0x80000000) << 1). If this is the
1237 case, we negate the ovf. */
1239 temp
= tcg_temp_new();
1240 temp2
= tcg_temp_new();
1241 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1242 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1243 tcg_gen_and_tl(temp
, temp
, temp2
);
1244 tcg_gen_shli_tl(temp
, temp
, 31);
1245 /* negate v bit, if special condition */
1246 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1248 tcg_temp_free(temp
);
1249 tcg_temp_free(temp2
);
1251 /* write back result */
1252 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1254 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1255 /* Calc AV/SAV bits */
1256 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1257 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1259 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1261 tcg_temp_free_i64(t1
);
1262 tcg_temp_free_i64(t2
);
1263 tcg_temp_free_i64(t3
);
1264 tcg_temp_free_i64(t4
);
1268 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1271 TCGv_i64 t1
= tcg_temp_new_i64();
1272 TCGv_i64 t2
= tcg_temp_new_i64();
1273 TCGv_i64 t3
= tcg_temp_new_i64();
1275 tcg_gen_ext_i32_i64(t1
, arg1
);
1276 tcg_gen_ext_i32_i64(t2
, arg2
);
1277 tcg_gen_ext_i32_i64(t3
, arg3
);
1279 tcg_gen_mul_i64(t2
, t2
, t3
);
1280 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1282 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1284 tcg_temp_free_i64(t1
);
1285 tcg_temp_free_i64(t2
);
1286 tcg_temp_free_i64(t3
);
1290 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1291 TCGv arg3
, uint32_t n
)
1293 TCGv_i64 r1
= tcg_temp_new_i64();
1294 TCGv temp
= tcg_const_i32(n
);
1296 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1297 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1298 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1300 tcg_temp_free_i64(r1
);
1301 tcg_temp_free(temp
);
1303 /* ret = r2 - (r1 * r3); */
1304 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1306 TCGv_i64 t1
= tcg_temp_new_i64();
1307 TCGv_i64 t2
= tcg_temp_new_i64();
1308 TCGv_i64 t3
= tcg_temp_new_i64();
1310 tcg_gen_ext_i32_i64(t1
, r1
);
1311 tcg_gen_ext_i32_i64(t2
, r2
);
1312 tcg_gen_ext_i32_i64(t3
, r3
);
1314 tcg_gen_mul_i64(t1
, t1
, t3
);
1315 tcg_gen_sub_i64(t1
, t2
, t1
);
1317 tcg_gen_trunc_i64_i32(ret
, t1
);
1320 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1321 /* result < -0x80000000 */
1322 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1323 tcg_gen_or_i64(t2
, t2
, t3
);
1324 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
1325 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1328 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1329 /* Calc AV/SAV bits */
1330 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1331 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1333 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1335 tcg_temp_free_i64(t1
);
1336 tcg_temp_free_i64(t2
);
1337 tcg_temp_free_i64(t3
);
1340 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1342 TCGv temp
= tcg_const_i32(con
);
1343 gen_msub32_d(ret
, r1
, r2
, temp
);
1344 tcg_temp_free(temp
);
1348 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1351 TCGv t1
= tcg_temp_new();
1352 TCGv t2
= tcg_temp_new();
1353 TCGv t3
= tcg_temp_new();
1354 TCGv t4
= tcg_temp_new();
1356 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1357 /* only the sub can overflow */
1358 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1360 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1361 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1362 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1364 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1365 /* Calc AV/SAV bits */
1366 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1367 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1369 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1370 /* write back the result */
1371 tcg_gen_mov_tl(ret_low
, t3
);
1372 tcg_gen_mov_tl(ret_high
, t4
);
1381 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1384 TCGv temp
= tcg_const_i32(con
);
1385 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1386 tcg_temp_free(temp
);
1390 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1393 TCGv_i64 t1
= tcg_temp_new_i64();
1394 TCGv_i64 t2
= tcg_temp_new_i64();
1395 TCGv_i64 t3
= tcg_temp_new_i64();
1397 tcg_gen_extu_i32_i64(t1
, r1
);
1398 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1399 tcg_gen_extu_i32_i64(t3
, r3
);
1401 tcg_gen_mul_i64(t1
, t1
, t3
);
1402 tcg_gen_sub_i64(t3
, t2
, t1
);
1403 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1404 /* calc V bit, only the sub can overflow, if t1 > t2 */
1405 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1406 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1407 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1409 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1410 /* Calc AV/SAV bits */
1411 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1412 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1414 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1416 tcg_temp_free_i64(t1
);
1417 tcg_temp_free_i64(t2
);
1418 tcg_temp_free_i64(t3
);
1422 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1425 TCGv temp
= tcg_const_i32(con
);
1426 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1427 tcg_temp_free(temp
);
1430 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1432 TCGv temp
= tcg_const_i32(r2
);
1433 gen_add_d(ret
, r1
, temp
);
1434 tcg_temp_free(temp
);
1436 /* calculate the carry bit too */
1437 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1439 TCGv t0
= tcg_temp_new_i32();
1440 TCGv result
= tcg_temp_new_i32();
1442 tcg_gen_movi_tl(t0
, 0);
1443 /* Addition and set C/V/SV bits */
1444 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1446 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1447 tcg_gen_xor_tl(t0
, r1
, r2
);
1448 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1450 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1451 /* Calc AV/SAV bits */
1452 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1453 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1455 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1456 /* write back result */
1457 tcg_gen_mov_tl(ret
, result
);
1459 tcg_temp_free(result
);
1463 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1465 TCGv temp
= tcg_const_i32(con
);
1466 gen_add_CC(ret
, r1
, temp
);
1467 tcg_temp_free(temp
);
1470 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1472 TCGv carry
= tcg_temp_new_i32();
1473 TCGv t0
= tcg_temp_new_i32();
1474 TCGv result
= tcg_temp_new_i32();
1476 tcg_gen_movi_tl(t0
, 0);
1477 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1478 /* Addition, carry and set C/V/SV bits */
1479 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1480 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1482 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1483 tcg_gen_xor_tl(t0
, r1
, r2
);
1484 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1486 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1487 /* Calc AV/SAV bits */
1488 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1489 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1491 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1492 /* write back result */
1493 tcg_gen_mov_tl(ret
, result
);
1495 tcg_temp_free(result
);
1497 tcg_temp_free(carry
);
1500 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1502 TCGv temp
= tcg_const_i32(con
);
1503 gen_addc_CC(ret
, r1
, temp
);
1504 tcg_temp_free(temp
);
1507 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1510 TCGv temp
= tcg_temp_new();
1511 TCGv temp2
= tcg_temp_new();
1512 TCGv result
= tcg_temp_new();
1513 TCGv mask
= tcg_temp_new();
1514 TCGv t0
= tcg_const_i32(0);
1516 /* create mask for sticky bits */
1517 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1518 tcg_gen_shli_tl(mask
, mask
, 31);
1520 tcg_gen_add_tl(result
, r1
, r2
);
1522 tcg_gen_xor_tl(temp
, result
, r1
);
1523 tcg_gen_xor_tl(temp2
, r1
, r2
);
1524 tcg_gen_andc_tl(temp
, temp
, temp2
);
1525 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1527 tcg_gen_and_tl(temp
, temp
, mask
);
1528 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1530 tcg_gen_add_tl(temp
, result
, result
);
1531 tcg_gen_xor_tl(temp
, temp
, result
);
1532 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1534 tcg_gen_and_tl(temp
, temp
, mask
);
1535 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1536 /* write back result */
1537 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1540 tcg_temp_free(temp
);
1541 tcg_temp_free(temp2
);
1542 tcg_temp_free(result
);
1543 tcg_temp_free(mask
);
1546 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1549 TCGv temp
= tcg_const_i32(r2
);
1550 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1551 tcg_temp_free(temp
);
1554 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1556 TCGv temp
= tcg_temp_new_i32();
1557 TCGv result
= tcg_temp_new_i32();
1559 tcg_gen_sub_tl(result
, r1
, r2
);
1561 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1562 tcg_gen_xor_tl(temp
, r1
, r2
);
1563 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1565 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1567 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1568 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1570 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1571 /* write back result */
1572 tcg_gen_mov_tl(ret
, result
);
1574 tcg_temp_free(temp
);
1575 tcg_temp_free(result
);
1579 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1581 TCGv temp
= tcg_temp_new();
1582 TCGv_i64 t0
= tcg_temp_new_i64();
1583 TCGv_i64 t1
= tcg_temp_new_i64();
1584 TCGv_i64 result
= tcg_temp_new_i64();
1586 tcg_gen_sub_i64(result
, r1
, r2
);
1588 tcg_gen_xor_i64(t1
, result
, r1
);
1589 tcg_gen_xor_i64(t0
, r1
, r2
);
1590 tcg_gen_and_i64(t1
, t1
, t0
);
1591 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
1593 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1594 /* calc AV/SAV bits */
1595 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
1596 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1597 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1599 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1600 /* write back result */
1601 tcg_gen_mov_i64(ret
, result
);
1603 tcg_temp_free(temp
);
1604 tcg_temp_free_i64(result
);
1605 tcg_temp_free_i64(t0
);
1606 tcg_temp_free_i64(t1
);
1609 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1611 TCGv result
= tcg_temp_new();
1612 TCGv temp
= tcg_temp_new();
1614 tcg_gen_sub_tl(result
, r1
, r2
);
1616 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1618 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1619 tcg_gen_xor_tl(temp
, r1
, r2
);
1620 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1622 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1624 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1625 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1627 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1628 /* write back result */
1629 tcg_gen_mov_tl(ret
, result
);
1631 tcg_temp_free(result
);
1632 tcg_temp_free(temp
);
1635 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1637 TCGv temp
= tcg_temp_new();
1638 tcg_gen_not_tl(temp
, r2
);
1639 gen_addc_CC(ret
, r1
, temp
);
1640 tcg_temp_free(temp
);
1643 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1646 TCGv temp
= tcg_temp_new();
1647 TCGv temp2
= tcg_temp_new();
1648 TCGv result
= tcg_temp_new();
1649 TCGv mask
= tcg_temp_new();
1650 TCGv t0
= tcg_const_i32(0);
1652 /* create mask for sticky bits */
1653 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1654 tcg_gen_shli_tl(mask
, mask
, 31);
1656 tcg_gen_sub_tl(result
, r1
, r2
);
1658 tcg_gen_xor_tl(temp
, result
, r1
);
1659 tcg_gen_xor_tl(temp2
, r1
, r2
);
1660 tcg_gen_and_tl(temp
, temp
, temp2
);
1661 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1663 tcg_gen_and_tl(temp
, temp
, mask
);
1664 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1666 tcg_gen_add_tl(temp
, result
, result
);
1667 tcg_gen_xor_tl(temp
, temp
, result
);
1668 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1670 tcg_gen_and_tl(temp
, temp
, mask
);
1671 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1672 /* write back result */
1673 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1676 tcg_temp_free(temp
);
1677 tcg_temp_free(temp2
);
1678 tcg_temp_free(result
);
1679 tcg_temp_free(mask
);
1683 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1684 TCGv r3
, uint32_t n
, uint32_t mode
)
1686 TCGv temp
= tcg_const_i32(n
);
1687 TCGv temp2
= tcg_temp_new();
1688 TCGv_i64 temp64
= tcg_temp_new_i64();
1691 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1694 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1697 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1700 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1703 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1704 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1705 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1706 tcg_temp_free(temp
);
1707 tcg_temp_free(temp2
);
1708 tcg_temp_free_i64(temp64
);
1712 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1713 TCGv r3
, uint32_t n
, uint32_t mode
)
1715 TCGv temp
= tcg_const_i32(n
);
1716 TCGv temp2
= tcg_temp_new();
1717 TCGv temp3
= tcg_temp_new();
1718 TCGv_i64 temp64
= tcg_temp_new_i64();
1722 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1725 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1728 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1731 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1734 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1735 gen_subs(ret_low
, r1_low
, temp
);
1736 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1737 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1738 gen_subs(ret_high
, r1_high
, temp2
);
1739 /* combine v bits */
1740 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1741 /* combine av bits */
1742 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1744 tcg_temp_free(temp
);
1745 tcg_temp_free(temp2
);
1746 tcg_temp_free(temp3
);
1747 tcg_temp_free_i64(temp64
);
1751 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1752 TCGv r3
, uint32_t n
, uint32_t mode
)
1754 TCGv temp
= tcg_const_i32(n
);
1755 TCGv_i64 temp64
= tcg_temp_new_i64();
1756 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1757 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1760 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1763 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1766 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1769 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1772 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1773 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1774 /* write back result */
1775 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1777 tcg_temp_free(temp
);
1778 tcg_temp_free_i64(temp64
);
1779 tcg_temp_free_i64(temp64_2
);
1780 tcg_temp_free_i64(temp64_3
);
1784 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1785 TCGv r3
, uint32_t n
, uint32_t mode
)
1787 TCGv temp
= tcg_const_i32(n
);
1788 TCGv_i64 temp64
= tcg_temp_new_i64();
1789 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1792 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1795 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1798 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1801 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1804 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1805 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1806 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1808 tcg_temp_free(temp
);
1809 tcg_temp_free_i64(temp64
);
1810 tcg_temp_free_i64(temp64_2
);
1814 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1817 TCGv temp
= tcg_const_i32(n
);
1818 TCGv_i64 temp64
= tcg_temp_new_i64();
1821 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1824 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1827 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1830 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1833 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1835 tcg_temp_free(temp
);
1836 tcg_temp_free_i64(temp64
);
1840 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1842 TCGv temp
= tcg_temp_new();
1843 TCGv temp2
= tcg_temp_new();
1845 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1846 tcg_gen_shli_tl(temp
, r1
, 16);
1847 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1849 tcg_temp_free(temp
);
1850 tcg_temp_free(temp2
);
1854 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1855 uint32_t n
, uint32_t mode
)
1857 TCGv temp
= tcg_const_i32(n
);
1858 TCGv_i64 temp64
= tcg_temp_new_i64();
1861 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1864 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1867 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1870 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1873 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1875 tcg_temp_free(temp
);
1876 tcg_temp_free_i64(temp64
);
1880 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1882 TCGv temp
= tcg_temp_new();
1883 TCGv temp2
= tcg_temp_new();
1885 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1886 tcg_gen_shli_tl(temp
, r1
, 16);
1887 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1889 tcg_temp_free(temp
);
1890 tcg_temp_free(temp2
);
1893 static inline void gen_abs(TCGv ret
, TCGv r1
)
1895 TCGv temp
= tcg_temp_new();
1896 TCGv t0
= tcg_const_i32(0);
1898 tcg_gen_neg_tl(temp
, r1
);
1899 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
1900 /* overflow can only happen, if r1 = 0x80000000 */
1901 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
1902 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1904 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1906 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1907 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1909 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1911 tcg_temp_free(temp
);
1915 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
1917 TCGv temp
= tcg_temp_new_i32();
1918 TCGv result
= tcg_temp_new_i32();
1920 tcg_gen_sub_tl(result
, r1
, r2
);
1921 tcg_gen_sub_tl(temp
, r2
, r1
);
1922 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
1925 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1926 tcg_gen_xor_tl(temp
, result
, r2
);
1927 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
1928 tcg_gen_xor_tl(temp
, r1
, r2
);
1929 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1931 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1933 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1934 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1936 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1937 /* write back result */
1938 tcg_gen_mov_tl(ret
, result
);
1940 tcg_temp_free(temp
);
1941 tcg_temp_free(result
);
1944 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
1946 TCGv temp
= tcg_const_i32(con
);
1947 gen_absdif(ret
, r1
, temp
);
1948 tcg_temp_free(temp
);
1951 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
1953 TCGv temp
= tcg_const_i32(con
);
1954 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
1955 tcg_temp_free(temp
);
1958 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
1960 TCGv high
= tcg_temp_new();
1961 TCGv low
= tcg_temp_new();
1963 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
1964 tcg_gen_mov_tl(ret
, low
);
1966 tcg_gen_sari_tl(low
, low
, 31);
1967 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
1968 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1970 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1972 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1973 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1975 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1977 tcg_temp_free(high
);
1981 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
1983 TCGv temp
= tcg_const_i32(con
);
1984 gen_mul_i32s(ret
, r1
, temp
);
1985 tcg_temp_free(temp
);
1988 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
1990 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
1992 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1994 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1996 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1997 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1999 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2002 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2005 TCGv temp
= tcg_const_i32(con
);
2006 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2007 tcg_temp_free(temp
);
2010 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2012 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2014 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2016 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2018 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2019 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2021 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2024 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2027 TCGv temp
= tcg_const_i32(con
);
2028 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2029 tcg_temp_free(temp
);
2032 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2034 TCGv temp
= tcg_const_i32(con
);
2035 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2036 tcg_temp_free(temp
);
2039 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2041 TCGv temp
= tcg_const_i32(con
);
2042 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2043 tcg_temp_free(temp
);
2045 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2046 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2048 TCGv temp
= tcg_const_i32(con
);
2049 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2050 tcg_temp_free(temp
);
2053 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2055 TCGv temp
= tcg_const_i32(con
);
2056 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2057 tcg_temp_free(temp
);
2061 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2063 TCGv temp
= tcg_temp_new();
2064 TCGv_i64 temp_64
= tcg_temp_new_i64();
2065 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2068 if (up_shift
== 32) {
2069 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2070 } else if (up_shift
== 16) {
2071 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2072 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2074 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2075 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2076 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2078 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2081 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2082 } else { /* n is exspected to be 1 */
2083 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2084 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2086 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2088 if (up_shift
== 0) {
2089 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2091 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2093 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2094 /* overflow only occours if r1 = r2 = 0x8000 */
2095 if (up_shift
== 0) {/* result is 64 bit */
2096 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2098 } else { /* result is 32 bit */
2099 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2102 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2103 /* calc sv overflow bit */
2104 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2106 /* calc av overflow bit */
2107 if (up_shift
== 0) {
2108 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2109 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2111 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2112 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2114 /* calc sav overflow bit */
2115 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2116 tcg_temp_free(temp
);
2117 tcg_temp_free_i64(temp_64
);
2118 tcg_temp_free_i64(temp2_64
);
2122 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2124 TCGv temp
= tcg_temp_new();
2126 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2127 } else { /* n is exspected to be 1 */
2128 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2129 tcg_gen_shli_tl(ret
, ret
, 1);
2130 /* catch special case r1 = r2 = 0x8000 */
2131 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2132 tcg_gen_sub_tl(ret
, ret
, temp
);
2135 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2136 /* calc av overflow bit */
2137 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2138 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2139 /* calc sav overflow bit */
2140 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2142 tcg_temp_free(temp
);
2145 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2147 TCGv temp
= tcg_temp_new();
2149 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2150 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2152 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2153 tcg_gen_shli_tl(ret
, ret
, 1);
2154 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2155 /* catch special case r1 = r2 = 0x8000 */
2156 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2157 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2158 tcg_gen_sub_tl(ret
, ret
, temp
);
2161 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2162 /* calc av overflow bit */
2163 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2164 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2165 /* calc sav overflow bit */
2166 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2167 /* cut halfword off */
2168 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2170 tcg_temp_free(temp
);
2174 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2177 TCGv_i64 temp64
= tcg_temp_new_i64();
2178 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2179 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2180 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2181 tcg_temp_free_i64(temp64
);
2185 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2188 TCGv temp
= tcg_const_i32(con
);
2189 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2190 tcg_temp_free(temp
);
2194 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2197 TCGv_i64 temp64
= tcg_temp_new_i64();
2198 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2199 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2200 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2201 tcg_temp_free_i64(temp64
);
2205 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2208 TCGv temp
= tcg_const_i32(con
);
2209 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2210 tcg_temp_free(temp
);
2213 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2215 TCGv temp
= tcg_const_i32(con
);
2216 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2217 tcg_temp_free(temp
);
2220 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2222 TCGv temp
= tcg_const_i32(con
);
2223 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2224 tcg_temp_free(temp
);
2228 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2231 TCGv_i64 temp64
= tcg_temp_new_i64();
2232 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2233 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2234 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2235 tcg_temp_free_i64(temp64
);
2239 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2242 TCGv temp
= tcg_const_i32(con
);
2243 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2244 tcg_temp_free(temp
);
2248 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2251 TCGv_i64 temp64
= tcg_temp_new_i64();
2252 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2253 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2254 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2255 tcg_temp_free_i64(temp64
);
2259 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2262 TCGv temp
= tcg_const_i32(con
);
2263 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2264 tcg_temp_free(temp
);
2267 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2269 TCGv sat_neg
= tcg_const_i32(low
);
2270 TCGv temp
= tcg_const_i32(up
);
2272 /* sat_neg = (arg < low ) ? low : arg; */
2273 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2275 /* ret = (sat_neg > up ) ? up : sat_neg; */
2276 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2278 tcg_temp_free(sat_neg
);
2279 tcg_temp_free(temp
);
2282 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2284 TCGv temp
= tcg_const_i32(up
);
2285 /* sat_neg = (arg > up ) ? up : arg; */
2286 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2287 tcg_temp_free(temp
);
2290 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2292 if (shift_count
== -32) {
2293 tcg_gen_movi_tl(ret
, 0);
2294 } else if (shift_count
>= 0) {
2295 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2297 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2301 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2303 TCGv temp_low
, temp_high
;
2305 if (shiftcount
== -16) {
2306 tcg_gen_movi_tl(ret
, 0);
2308 temp_high
= tcg_temp_new();
2309 temp_low
= tcg_temp_new();
2311 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2312 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2313 gen_shi(temp_low
, temp_low
, shiftcount
);
2314 gen_shi(ret
, temp_high
, shiftcount
);
2315 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2317 tcg_temp_free(temp_low
);
2318 tcg_temp_free(temp_high
);
2322 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2324 uint32_t msk
, msk_start
;
2325 TCGv temp
= tcg_temp_new();
2326 TCGv temp2
= tcg_temp_new();
2327 TCGv t_0
= tcg_const_i32(0);
2329 if (shift_count
== 0) {
2330 /* Clear PSW.C and PSW.V */
2331 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2332 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2333 tcg_gen_mov_tl(ret
, r1
);
2334 } else if (shift_count
== -32) {
2336 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2337 /* fill ret completly with sign bit */
2338 tcg_gen_sari_tl(ret
, r1
, 31);
2340 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2341 } else if (shift_count
> 0) {
2342 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2343 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2346 msk_start
= 32 - shift_count
;
2347 msk
= ((1 << shift_count
) - 1) << msk_start
;
2348 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2349 /* calc v/sv bits */
2350 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2351 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2352 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2353 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2355 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2357 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2359 tcg_temp_free(t_max
);
2360 tcg_temp_free(t_min
);
2363 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2365 msk
= (1 << -shift_count
) - 1;
2366 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2368 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2370 /* calc av overflow bit */
2371 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2372 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2373 /* calc sav overflow bit */
2374 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2376 tcg_temp_free(temp
);
2377 tcg_temp_free(temp2
);
2381 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2383 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2386 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2388 TCGv temp
= tcg_const_i32(con
);
2389 gen_shas(ret
, r1
, temp
);
2390 tcg_temp_free(temp
);
2393 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2397 if (shift_count
== 0) {
2398 tcg_gen_mov_tl(ret
, r1
);
2399 } else if (shift_count
> 0) {
2400 low
= tcg_temp_new();
2401 high
= tcg_temp_new();
2403 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2404 tcg_gen_shli_tl(low
, r1
, shift_count
);
2405 tcg_gen_shli_tl(ret
, high
, shift_count
);
2406 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2409 tcg_temp_free(high
);
2411 low
= tcg_temp_new();
2412 high
= tcg_temp_new();
2414 tcg_gen_ext16s_tl(low
, r1
);
2415 tcg_gen_sari_tl(low
, low
, -shift_count
);
2416 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2417 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2420 tcg_temp_free(high
);
2425 /* ret = {ret[30:0], (r1 cond r2)}; */
2426 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2428 TCGv temp
= tcg_temp_new();
2429 TCGv temp2
= tcg_temp_new();
2431 tcg_gen_shli_tl(temp
, ret
, 1);
2432 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2433 tcg_gen_or_tl(ret
, temp
, temp2
);
2435 tcg_temp_free(temp
);
2436 tcg_temp_free(temp2
);
2439 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2441 TCGv temp
= tcg_const_i32(con
);
2442 gen_sh_cond(cond
, ret
, r1
, temp
);
2443 tcg_temp_free(temp
);
2446 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2448 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2451 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2453 TCGv temp
= tcg_const_i32(con
);
2454 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2455 tcg_temp_free(temp
);
2458 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2460 TCGv temp
= tcg_const_i32(con
);
2461 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2462 tcg_temp_free(temp
);
2465 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2467 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2470 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2472 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2475 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2477 void(*op1
)(TCGv
, TCGv
, TCGv
),
2478 void(*op2
)(TCGv
, TCGv
, TCGv
))
2482 temp1
= tcg_temp_new();
2483 temp2
= tcg_temp_new();
2485 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2486 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2488 (*op1
)(temp1
, temp1
, temp2
);
2489 (*op2
)(temp1
, ret
, temp1
);
2491 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2493 tcg_temp_free(temp1
);
2494 tcg_temp_free(temp2
);
2497 /* ret = r1[pos1] op1 r2[pos2]; */
2498 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2500 void(*op1
)(TCGv
, TCGv
, TCGv
))
2504 temp1
= tcg_temp_new();
2505 temp2
= tcg_temp_new();
2507 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2508 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2510 (*op1
)(ret
, temp1
, temp2
);
2512 tcg_gen_andi_tl(ret
, ret
, 0x1);
2514 tcg_temp_free(temp1
);
2515 tcg_temp_free(temp2
);
2518 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2519 void(*op
)(TCGv
, TCGv
, TCGv
))
2521 TCGv temp
= tcg_temp_new();
2522 TCGv temp2
= tcg_temp_new();
2523 /* temp = (arg1 cond arg2 )*/
2524 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2526 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2527 /* temp = temp insn temp2 */
2528 (*op
)(temp
, temp
, temp2
);
2529 /* ret = {ret[31:1], temp} */
2530 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2532 tcg_temp_free(temp
);
2533 tcg_temp_free(temp2
);
2537 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2538 void(*op
)(TCGv
, TCGv
, TCGv
))
2540 TCGv temp
= tcg_const_i32(con
);
2541 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2542 tcg_temp_free(temp
);
2545 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2546 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2548 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2549 tcg_gen_neg_tl(ret
, ret
);
2552 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2554 TCGv b0
= tcg_temp_new();
2555 TCGv b1
= tcg_temp_new();
2556 TCGv b2
= tcg_temp_new();
2557 TCGv b3
= tcg_temp_new();
2560 tcg_gen_andi_tl(b0
, r1
, 0xff);
2561 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2564 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2565 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2568 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2569 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2572 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2573 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2576 tcg_gen_or_tl(ret
, b0
, b1
);
2577 tcg_gen_or_tl(ret
, ret
, b2
);
2578 tcg_gen_or_tl(ret
, ret
, b3
);
2586 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2588 TCGv h0
= tcg_temp_new();
2589 TCGv h1
= tcg_temp_new();
2592 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2593 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2596 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2597 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2600 tcg_gen_or_tl(ret
, h0
, h1
);
2605 /* mask = ((1 << width) -1) << pos;
2606 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2607 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2609 TCGv mask
= tcg_temp_new();
2610 TCGv temp
= tcg_temp_new();
2611 TCGv temp2
= tcg_temp_new();
2613 tcg_gen_movi_tl(mask
, 1);
2614 tcg_gen_shl_tl(mask
, mask
, width
);
2615 tcg_gen_subi_tl(mask
, mask
, 1);
2616 tcg_gen_shl_tl(mask
, mask
, pos
);
2618 tcg_gen_shl_tl(temp
, r2
, pos
);
2619 tcg_gen_and_tl(temp
, temp
, mask
);
2620 tcg_gen_andc_tl(temp2
, r1
, mask
);
2621 tcg_gen_or_tl(ret
, temp
, temp2
);
2623 tcg_temp_free(mask
);
2624 tcg_temp_free(temp
);
2625 tcg_temp_free(temp2
);
2628 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2630 TCGv_i64 temp
= tcg_temp_new_i64();
2632 gen_helper_bsplit(temp
, r1
);
2633 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2635 tcg_temp_free_i64(temp
);
2638 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2640 TCGv_i64 temp
= tcg_temp_new_i64();
2642 gen_helper_unpack(temp
, r1
);
2643 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2645 tcg_temp_free_i64(temp
);
2649 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2651 TCGv_i64 ret
= tcg_temp_new_i64();
2653 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
2654 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2656 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2658 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2660 tcg_temp_free_i64(ret
);
2664 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2666 TCGv_i64 ret
= tcg_temp_new_i64();
2668 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
2669 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2671 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2673 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2675 tcg_temp_free_i64(ret
);
2678 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2680 TCGv temp
= tcg_temp_new();
2682 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2683 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2684 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2685 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2686 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2688 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2689 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2690 tcg_temp_free(temp
);
2693 static void gen_calc_usb_mulr_h(TCGv arg
)
2695 TCGv temp
= tcg_temp_new();
2697 tcg_gen_add_tl(temp
, arg
, arg
);
2698 tcg_gen_xor_tl(temp
, temp
, arg
);
2699 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2700 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2702 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2704 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2705 tcg_temp_free(temp
);
2708 /* helpers for generating program flow micro-ops */
2710 static inline void gen_save_pc(target_ulong pc
)
2712 tcg_gen_movi_tl(cpu_PC
, pc
);
2715 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2717 TranslationBlock
*tb
;
2719 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
2720 likely(!ctx
->singlestep_enabled
)) {
2723 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
2726 if (ctx
->singlestep_enabled
) {
2727 /* raise exception debug */
2733 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2734 TCGv r2
, int16_t address
)
2736 TCGLabel
*jumpLabel
= gen_new_label();
2737 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2739 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
2741 gen_set_label(jumpLabel
);
2742 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
2745 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2746 int r2
, int16_t address
)
2748 TCGv temp
= tcg_const_i32(r2
);
2749 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2750 tcg_temp_free(temp
);
2753 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2755 TCGLabel
*l1
= gen_new_label();
2757 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2758 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2759 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
2761 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
2764 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2765 int r2
, int32_t constant
, int32_t offset
)
2771 /* SB-format jumps */
2774 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2776 case OPC1_32_B_CALL
:
2777 case OPC1_16_SB_CALL
:
2778 gen_helper_1arg(call
, ctx
->next_pc
);
2779 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2782 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2784 case OPC1_16_SB_JNZ
:
2785 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2787 /* SBC-format jumps */
2788 case OPC1_16_SBC_JEQ
:
2789 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2791 case OPC1_16_SBC_JNE
:
2792 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2794 /* SBRN-format jumps */
2795 case OPC1_16_SBRN_JZ_T
:
2796 temp
= tcg_temp_new();
2797 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2798 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2799 tcg_temp_free(temp
);
2801 case OPC1_16_SBRN_JNZ_T
:
2802 temp
= tcg_temp_new();
2803 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2804 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2805 tcg_temp_free(temp
);
2807 /* SBR-format jumps */
2808 case OPC1_16_SBR_JEQ
:
2809 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2812 case OPC1_16_SBR_JNE
:
2813 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2816 case OPC1_16_SBR_JNZ
:
2817 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2819 case OPC1_16_SBR_JNZ_A
:
2820 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2822 case OPC1_16_SBR_JGEZ
:
2823 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2825 case OPC1_16_SBR_JGTZ
:
2826 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2828 case OPC1_16_SBR_JLEZ
:
2829 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2831 case OPC1_16_SBR_JLTZ
:
2832 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2834 case OPC1_16_SBR_JZ
:
2835 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2837 case OPC1_16_SBR_JZ_A
:
2838 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2840 case OPC1_16_SBR_LOOP
:
2841 gen_loop(ctx
, r1
, offset
* 2 - 32);
2843 /* SR-format jumps */
2845 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2848 case OPC2_16_SR_RET
:
2849 gen_helper_ret(cpu_env
);
2853 case OPC1_32_B_CALLA
:
2854 gen_helper_1arg(call
, ctx
->next_pc
);
2855 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2858 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2861 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2864 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2865 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2868 case OPCM_32_BRC_EQ_NEQ
:
2869 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
2870 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
2872 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
2875 case OPCM_32_BRC_GE
:
2876 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
2877 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
2879 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2880 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
2884 case OPCM_32_BRC_JLT
:
2885 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
2886 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
2888 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2889 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
2893 case OPCM_32_BRC_JNE
:
2894 temp
= tcg_temp_new();
2895 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
2896 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2897 /* subi is unconditional */
2898 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2899 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2901 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2902 /* addi is unconditional */
2903 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2904 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2906 tcg_temp_free(temp
);
2909 case OPCM_32_BRN_JTT
:
2910 n
= MASK_OP_BRN_N(ctx
->opcode
);
2912 temp
= tcg_temp_new();
2913 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
2915 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
2916 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2918 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2920 tcg_temp_free(temp
);
2923 case OPCM_32_BRR_EQ_NEQ
:
2924 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
2925 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2928 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2932 case OPCM_32_BRR_ADDR_EQ_NEQ
:
2933 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
2934 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2937 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2941 case OPCM_32_BRR_GE
:
2942 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
2943 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2946 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2950 case OPCM_32_BRR_JLT
:
2951 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
2952 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2955 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2959 case OPCM_32_BRR_LOOP
:
2960 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
2961 gen_loop(ctx
, r1
, offset
* 2);
2963 /* OPC2_32_BRR_LOOPU */
2964 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2967 case OPCM_32_BRR_JNE
:
2968 temp
= tcg_temp_new();
2969 temp2
= tcg_temp_new();
2970 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
2971 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2972 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2973 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2974 /* subi is unconditional */
2975 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2976 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2978 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2979 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2980 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2981 /* addi is unconditional */
2982 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2983 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2985 tcg_temp_free(temp
);
2986 tcg_temp_free(temp2
);
2988 case OPCM_32_BRR_JNZ
:
2989 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
2990 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2992 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2996 printf("Branch Error at %x\n", ctx
->pc
);
2998 ctx
->bstate
= BS_BRANCH
;
3003 * Functions for decoding instructions
3006 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3012 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3013 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3016 case OPC1_16_SRC_ADD
:
3017 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3019 case OPC1_16_SRC_ADD_A15
:
3020 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3022 case OPC1_16_SRC_ADD_15A
:
3023 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3025 case OPC1_16_SRC_ADD_A
:
3026 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3028 case OPC1_16_SRC_CADD
:
3029 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3032 case OPC1_16_SRC_CADDN
:
3033 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3036 case OPC1_16_SRC_CMOV
:
3037 temp
= tcg_const_tl(0);
3038 temp2
= tcg_const_tl(const4
);
3039 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3040 temp2
, cpu_gpr_d
[r1
]);
3041 tcg_temp_free(temp
);
3042 tcg_temp_free(temp2
);
3044 case OPC1_16_SRC_CMOVN
:
3045 temp
= tcg_const_tl(0);
3046 temp2
= tcg_const_tl(const4
);
3047 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3048 temp2
, cpu_gpr_d
[r1
]);
3049 tcg_temp_free(temp
);
3050 tcg_temp_free(temp2
);
3052 case OPC1_16_SRC_EQ
:
3053 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3056 case OPC1_16_SRC_LT
:
3057 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3060 case OPC1_16_SRC_MOV
:
3061 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3063 case OPC1_16_SRC_MOV_A
:
3064 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3065 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3067 case OPC1_16_SRC_SH
:
3068 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3070 case OPC1_16_SRC_SHA
:
3071 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3076 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3081 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3082 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3085 case OPC1_16_SRR_ADD
:
3086 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3088 case OPC1_16_SRR_ADD_A15
:
3089 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3091 case OPC1_16_SRR_ADD_15A
:
3092 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3094 case OPC1_16_SRR_ADD_A
:
3095 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3097 case OPC1_16_SRR_ADDS
:
3098 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3100 case OPC1_16_SRR_AND
:
3101 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3103 case OPC1_16_SRR_CMOV
:
3104 temp
= tcg_const_tl(0);
3105 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3106 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3107 tcg_temp_free(temp
);
3109 case OPC1_16_SRR_CMOVN
:
3110 temp
= tcg_const_tl(0);
3111 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3112 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3113 tcg_temp_free(temp
);
3115 case OPC1_16_SRR_EQ
:
3116 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3119 case OPC1_16_SRR_LT
:
3120 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3123 case OPC1_16_SRR_MOV
:
3124 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3126 case OPC1_16_SRR_MOV_A
:
3127 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3129 case OPC1_16_SRR_MOV_AA
:
3130 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3132 case OPC1_16_SRR_MOV_D
:
3133 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3135 case OPC1_16_SRR_MUL
:
3136 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3138 case OPC1_16_SRR_OR
:
3139 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3141 case OPC1_16_SRR_SUB
:
3142 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3144 case OPC1_16_SRR_SUB_A15B
:
3145 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3147 case OPC1_16_SRR_SUB_15AB
:
3148 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3150 case OPC1_16_SRR_SUBS
:
3151 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3153 case OPC1_16_SRR_XOR
:
3154 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3159 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3163 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3164 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3167 case OPC1_16_SSR_ST_A
:
3168 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3170 case OPC1_16_SSR_ST_A_POSTINC
:
3171 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3172 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3174 case OPC1_16_SSR_ST_B
:
3175 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3177 case OPC1_16_SSR_ST_B_POSTINC
:
3178 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3179 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3181 case OPC1_16_SSR_ST_H
:
3182 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3184 case OPC1_16_SSR_ST_H_POSTINC
:
3185 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3186 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3188 case OPC1_16_SSR_ST_W
:
3189 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3191 case OPC1_16_SSR_ST_W_POSTINC
:
3192 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3193 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3198 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3202 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3205 case OPC1_16_SC_AND
:
3206 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3208 case OPC1_16_SC_BISR
:
3209 gen_helper_1arg(bisr
, const16
& 0xff);
3211 case OPC1_16_SC_LD_A
:
3212 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3214 case OPC1_16_SC_LD_W
:
3215 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3217 case OPC1_16_SC_MOV
:
3218 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3221 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3223 case OPC1_16_SC_ST_A
:
3224 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3226 case OPC1_16_SC_ST_W
:
3227 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3229 case OPC1_16_SC_SUB_A
:
3230 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3235 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3239 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3240 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3244 case OPC1_16_SLR_LD_A
:
3245 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3247 case OPC1_16_SLR_LD_A_POSTINC
:
3248 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3249 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3251 case OPC1_16_SLR_LD_BU
:
3252 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3254 case OPC1_16_SLR_LD_BU_POSTINC
:
3255 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3256 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3258 case OPC1_16_SLR_LD_H
:
3259 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3261 case OPC1_16_SLR_LD_H_POSTINC
:
3262 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3263 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3265 case OPC1_16_SLR_LD_W
:
3266 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3268 case OPC1_16_SLR_LD_W_POSTINC
:
3269 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3270 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3275 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3280 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3281 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3285 case OPC1_16_SRO_LD_A
:
3286 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3288 case OPC1_16_SRO_LD_BU
:
3289 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3291 case OPC1_16_SRO_LD_H
:
3292 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3294 case OPC1_16_SRO_LD_W
:
3295 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3297 case OPC1_16_SRO_ST_A
:
3298 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3300 case OPC1_16_SRO_ST_B
:
3301 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3303 case OPC1_16_SRO_ST_H
:
3304 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3306 case OPC1_16_SRO_ST_W
:
3307 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3312 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3315 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3318 case OPC2_16_SR_NOP
:
3320 case OPC2_16_SR_RET
:
3321 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3323 case OPC2_16_SR_RFE
:
3324 gen_helper_rfe(cpu_env
);
3326 ctx
->bstate
= BS_BRANCH
;
3328 case OPC2_16_SR_DEBUG
:
3329 /* raise EXCP_DEBUG */
3334 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3340 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3341 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3344 case OPC2_16_SR_RSUB
:
3345 /* overflow only if r1 = -0x80000000 */
3346 temp
= tcg_const_i32(-0x80000000);
3348 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3349 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3351 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3353 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3355 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3356 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3358 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3359 tcg_temp_free(temp
);
3361 case OPC2_16_SR_SAT_B
:
3362 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3364 case OPC2_16_SR_SAT_BU
:
3365 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3367 case OPC2_16_SR_SAT_H
:
3368 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3370 case OPC2_16_SR_SAT_HU
:
3371 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3376 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3384 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3386 /* handle ADDSC.A opcode only being 6 bit long */
3387 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3388 op1
= OPC1_16_SRRS_ADDSC_A
;
3392 case OPC1_16_SRC_ADD
:
3393 case OPC1_16_SRC_ADD_A15
:
3394 case OPC1_16_SRC_ADD_15A
:
3395 case OPC1_16_SRC_ADD_A
:
3396 case OPC1_16_SRC_CADD
:
3397 case OPC1_16_SRC_CADDN
:
3398 case OPC1_16_SRC_CMOV
:
3399 case OPC1_16_SRC_CMOVN
:
3400 case OPC1_16_SRC_EQ
:
3401 case OPC1_16_SRC_LT
:
3402 case OPC1_16_SRC_MOV
:
3403 case OPC1_16_SRC_MOV_A
:
3404 case OPC1_16_SRC_SH
:
3405 case OPC1_16_SRC_SHA
:
3406 decode_src_opc(ctx
, op1
);
3409 case OPC1_16_SRR_ADD
:
3410 case OPC1_16_SRR_ADD_A15
:
3411 case OPC1_16_SRR_ADD_15A
:
3412 case OPC1_16_SRR_ADD_A
:
3413 case OPC1_16_SRR_ADDS
:
3414 case OPC1_16_SRR_AND
:
3415 case OPC1_16_SRR_CMOV
:
3416 case OPC1_16_SRR_CMOVN
:
3417 case OPC1_16_SRR_EQ
:
3418 case OPC1_16_SRR_LT
:
3419 case OPC1_16_SRR_MOV
:
3420 case OPC1_16_SRR_MOV_A
:
3421 case OPC1_16_SRR_MOV_AA
:
3422 case OPC1_16_SRR_MOV_D
:
3423 case OPC1_16_SRR_MUL
:
3424 case OPC1_16_SRR_OR
:
3425 case OPC1_16_SRR_SUB
:
3426 case OPC1_16_SRR_SUB_A15B
:
3427 case OPC1_16_SRR_SUB_15AB
:
3428 case OPC1_16_SRR_SUBS
:
3429 case OPC1_16_SRR_XOR
:
3430 decode_srr_opc(ctx
, op1
);
3433 case OPC1_16_SSR_ST_A
:
3434 case OPC1_16_SSR_ST_A_POSTINC
:
3435 case OPC1_16_SSR_ST_B
:
3436 case OPC1_16_SSR_ST_B_POSTINC
:
3437 case OPC1_16_SSR_ST_H
:
3438 case OPC1_16_SSR_ST_H_POSTINC
:
3439 case OPC1_16_SSR_ST_W
:
3440 case OPC1_16_SSR_ST_W_POSTINC
:
3441 decode_ssr_opc(ctx
, op1
);
3444 case OPC1_16_SRRS_ADDSC_A
:
3445 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3446 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3447 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3448 temp
= tcg_temp_new();
3449 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3450 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3451 tcg_temp_free(temp
);
3454 case OPC1_16_SLRO_LD_A
:
3455 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3456 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3457 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3459 case OPC1_16_SLRO_LD_BU
:
3460 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3461 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3462 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3464 case OPC1_16_SLRO_LD_H
:
3465 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3466 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3467 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3469 case OPC1_16_SLRO_LD_W
:
3470 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3471 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3472 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3475 case OPC1_16_SB_CALL
:
3477 case OPC1_16_SB_JNZ
:
3479 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3480 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3483 case OPC1_16_SBC_JEQ
:
3484 case OPC1_16_SBC_JNE
:
3485 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3486 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3487 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3490 case OPC1_16_SBRN_JNZ_T
:
3491 case OPC1_16_SBRN_JZ_T
:
3492 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3493 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3494 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3497 case OPC1_16_SBR_JEQ
:
3498 case OPC1_16_SBR_JGEZ
:
3499 case OPC1_16_SBR_JGTZ
:
3500 case OPC1_16_SBR_JLEZ
:
3501 case OPC1_16_SBR_JLTZ
:
3502 case OPC1_16_SBR_JNE
:
3503 case OPC1_16_SBR_JNZ
:
3504 case OPC1_16_SBR_JNZ_A
:
3505 case OPC1_16_SBR_JZ
:
3506 case OPC1_16_SBR_JZ_A
:
3507 case OPC1_16_SBR_LOOP
:
3508 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3509 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3510 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3513 case OPC1_16_SC_AND
:
3514 case OPC1_16_SC_BISR
:
3515 case OPC1_16_SC_LD_A
:
3516 case OPC1_16_SC_LD_W
:
3517 case OPC1_16_SC_MOV
:
3519 case OPC1_16_SC_ST_A
:
3520 case OPC1_16_SC_ST_W
:
3521 case OPC1_16_SC_SUB_A
:
3522 decode_sc_opc(ctx
, op1
);
3525 case OPC1_16_SLR_LD_A
:
3526 case OPC1_16_SLR_LD_A_POSTINC
:
3527 case OPC1_16_SLR_LD_BU
:
3528 case OPC1_16_SLR_LD_BU_POSTINC
:
3529 case OPC1_16_SLR_LD_H
:
3530 case OPC1_16_SLR_LD_H_POSTINC
:
3531 case OPC1_16_SLR_LD_W
:
3532 case OPC1_16_SLR_LD_W_POSTINC
:
3533 decode_slr_opc(ctx
, op1
);
3536 case OPC1_16_SRO_LD_A
:
3537 case OPC1_16_SRO_LD_BU
:
3538 case OPC1_16_SRO_LD_H
:
3539 case OPC1_16_SRO_LD_W
:
3540 case OPC1_16_SRO_ST_A
:
3541 case OPC1_16_SRO_ST_B
:
3542 case OPC1_16_SRO_ST_H
:
3543 case OPC1_16_SRO_ST_W
:
3544 decode_sro_opc(ctx
, op1
);
3547 case OPC1_16_SSRO_ST_A
:
3548 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3549 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3550 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3552 case OPC1_16_SSRO_ST_B
:
3553 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3554 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3555 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3557 case OPC1_16_SSRO_ST_H
:
3558 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3559 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3560 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3562 case OPC1_16_SSRO_ST_W
:
3563 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3564 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3565 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3568 case OPCM_16_SR_SYSTEM
:
3569 decode_sr_system(env
, ctx
);
3571 case OPCM_16_SR_ACCU
:
3572 decode_sr_accu(env
, ctx
);
3575 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3576 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3578 case OPC1_16_SR_NOT
:
3579 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3580 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3586 * 32 bit instructions
3590 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
3597 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3598 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3599 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3601 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3604 case OPC2_32_ABS_LD_A
:
3605 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3607 case OPC2_32_ABS_LD_D
:
3608 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3610 case OPC2_32_ABS_LD_DA
:
3611 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3613 case OPC2_32_ABS_LD_W
:
3614 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3618 tcg_temp_free(temp
);
3621 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
3628 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3629 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3630 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3632 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3635 case OPC2_32_ABS_LD_B
:
3636 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3638 case OPC2_32_ABS_LD_BU
:
3639 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3641 case OPC2_32_ABS_LD_H
:
3642 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3644 case OPC2_32_ABS_LD_HU
:
3645 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3649 tcg_temp_free(temp
);
3652 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
3659 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3660 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3661 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3663 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3666 case OPC2_32_ABS_LDMST
:
3667 gen_ldmst(ctx
, r1
, temp
);
3669 case OPC2_32_ABS_SWAP_W
:
3670 gen_swap(ctx
, r1
, temp
);
3674 tcg_temp_free(temp
);
3677 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
3682 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3683 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3686 case OPC2_32_ABS_LDLCX
:
3687 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3689 case OPC2_32_ABS_LDUCX
:
3690 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3692 case OPC2_32_ABS_STLCX
:
3693 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3695 case OPC2_32_ABS_STUCX
:
3696 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3701 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
3708 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3709 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3710 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3712 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3715 case OPC2_32_ABS_ST_A
:
3716 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3718 case OPC2_32_ABS_ST_D
:
3719 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3721 case OPC2_32_ABS_ST_DA
:
3722 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3724 case OPC2_32_ABS_ST_W
:
3725 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3729 tcg_temp_free(temp
);
3732 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
3739 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3740 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3741 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3743 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3746 case OPC2_32_ABS_ST_B
:
3747 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3749 case OPC2_32_ABS_ST_H
:
3750 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3753 tcg_temp_free(temp
);
3758 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3764 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3765 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3766 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3767 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3768 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3769 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3773 case OPC2_32_BIT_AND_AND_T
:
3774 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3775 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3777 case OPC2_32_BIT_AND_ANDN_T
:
3778 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3779 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3781 case OPC2_32_BIT_AND_NOR_T
:
3782 if (TCG_TARGET_HAS_andc_i32
) {
3783 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3784 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3786 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3787 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3790 case OPC2_32_BIT_AND_OR_T
:
3791 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3792 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3797 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
3802 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3803 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3804 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3805 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3806 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3807 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3810 case OPC2_32_BIT_AND_T
:
3811 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3812 pos1
, pos2
, &tcg_gen_and_tl
);
3814 case OPC2_32_BIT_ANDN_T
:
3815 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3816 pos1
, pos2
, &tcg_gen_andc_tl
);
3818 case OPC2_32_BIT_NOR_T
:
3819 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3820 pos1
, pos2
, &tcg_gen_nor_tl
);
3822 case OPC2_32_BIT_OR_T
:
3823 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3824 pos1
, pos2
, &tcg_gen_or_tl
);
3829 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3835 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3836 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3837 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3838 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3839 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3840 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3842 temp
= tcg_temp_new();
3844 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
3845 if (op2
== OPC2_32_BIT_INSN_T
) {
3846 tcg_gen_not_tl(temp
, temp
);
3848 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
3849 tcg_temp_free(temp
);
3852 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3859 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3860 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3861 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3862 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3863 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3864 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3867 case OPC2_32_BIT_NAND_T
:
3868 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3869 pos1
, pos2
, &tcg_gen_nand_tl
);
3871 case OPC2_32_BIT_ORN_T
:
3872 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3873 pos1
, pos2
, &tcg_gen_orc_tl
);
3875 case OPC2_32_BIT_XNOR_T
:
3876 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3877 pos1
, pos2
, &tcg_gen_eqv_tl
);
3879 case OPC2_32_BIT_XOR_T
:
3880 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3881 pos1
, pos2
, &tcg_gen_xor_tl
);
3886 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
3893 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3894 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3895 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3896 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3897 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3898 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3901 case OPC2_32_BIT_OR_AND_T
:
3902 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3903 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
3905 case OPC2_32_BIT_OR_ANDN_T
:
3906 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3907 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
3909 case OPC2_32_BIT_OR_NOR_T
:
3910 if (TCG_TARGET_HAS_orc_i32
) {
3911 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3912 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
3914 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3915 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
3918 case OPC2_32_BIT_OR_OR_T
:
3919 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3920 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
3925 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
3932 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3933 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3934 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3935 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3936 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3937 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3939 temp
= tcg_temp_new();
3942 case OPC2_32_BIT_SH_AND_T
:
3943 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3944 pos1
, pos2
, &tcg_gen_and_tl
);
3946 case OPC2_32_BIT_SH_ANDN_T
:
3947 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3948 pos1
, pos2
, &tcg_gen_andc_tl
);
3950 case OPC2_32_BIT_SH_NOR_T
:
3951 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3952 pos1
, pos2
, &tcg_gen_nor_tl
);
3954 case OPC2_32_BIT_SH_OR_T
:
3955 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3956 pos1
, pos2
, &tcg_gen_or_tl
);
3959 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3960 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
3961 tcg_temp_free(temp
);
3964 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3971 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3972 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3973 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3974 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3975 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3976 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3978 temp
= tcg_temp_new();
3981 case OPC2_32_BIT_SH_NAND_T
:
3982 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
3983 pos1
, pos2
, &tcg_gen_nand_tl
);
3985 case OPC2_32_BIT_SH_ORN_T
:
3986 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3987 pos1
, pos2
, &tcg_gen_orc_tl
);
3989 case OPC2_32_BIT_SH_XNOR_T
:
3990 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3991 pos1
, pos2
, &tcg_gen_eqv_tl
);
3993 case OPC2_32_BIT_SH_XOR_T
:
3994 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3995 pos1
, pos2
, &tcg_gen_xor_tl
);
3998 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3999 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4000 tcg_temp_free(temp
);
4006 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4014 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4015 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4016 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4017 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4020 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4021 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4022 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4023 /* instruction to access the cache */
4025 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4026 case OPC2_32_BO_CACHEA_W_POSTINC
:
4027 case OPC2_32_BO_CACHEA_I_POSTINC
:
4028 /* instruction to access the cache, but we still need to handle
4029 the addressing mode */
4030 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
4032 case OPC2_32_BO_CACHEA_WI_PREINC
:
4033 case OPC2_32_BO_CACHEA_W_PREINC
:
4034 case OPC2_32_BO_CACHEA_I_PREINC
:
4035 /* instruction to access the cache, but we still need to handle
4036 the addressing mode */
4037 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
4039 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4040 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4041 /* TODO: Raise illegal opcode trap,
4042 if !tricore_feature(TRICORE_FEATURE_131) */
4044 case OPC2_32_BO_CACHEI_W_POSTINC
:
4045 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4046 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4047 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
4048 } /* TODO: else raise illegal opcode trap */
4050 case OPC2_32_BO_CACHEI_W_PREINC
:
4051 case OPC2_32_BO_CACHEI_WI_PREINC
:
4052 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4053 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
4054 } /* TODO: else raise illegal opcode trap */
4056 case OPC2_32_BO_ST_A_SHORTOFF
:
4057 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4059 case OPC2_32_BO_ST_A_POSTINC
:
4060 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4062 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4064 case OPC2_32_BO_ST_A_PREINC
:
4065 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4067 case OPC2_32_BO_ST_B_SHORTOFF
:
4068 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4070 case OPC2_32_BO_ST_B_POSTINC
:
4071 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4073 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4075 case OPC2_32_BO_ST_B_PREINC
:
4076 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4078 case OPC2_32_BO_ST_D_SHORTOFF
:
4079 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4082 case OPC2_32_BO_ST_D_POSTINC
:
4083 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4084 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4086 case OPC2_32_BO_ST_D_PREINC
:
4087 temp
= tcg_temp_new();
4088 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4089 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4090 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4091 tcg_temp_free(temp
);
4093 case OPC2_32_BO_ST_DA_SHORTOFF
:
4094 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4097 case OPC2_32_BO_ST_DA_POSTINC
:
4098 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4099 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4101 case OPC2_32_BO_ST_DA_PREINC
:
4102 temp
= tcg_temp_new();
4103 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4104 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4105 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4106 tcg_temp_free(temp
);
4108 case OPC2_32_BO_ST_H_SHORTOFF
:
4109 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4111 case OPC2_32_BO_ST_H_POSTINC
:
4112 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4114 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4116 case OPC2_32_BO_ST_H_PREINC
:
4117 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4119 case OPC2_32_BO_ST_Q_SHORTOFF
:
4120 temp
= tcg_temp_new();
4121 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4122 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4123 tcg_temp_free(temp
);
4125 case OPC2_32_BO_ST_Q_POSTINC
:
4126 temp
= tcg_temp_new();
4127 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4128 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4130 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4131 tcg_temp_free(temp
);
4133 case OPC2_32_BO_ST_Q_PREINC
:
4134 temp
= tcg_temp_new();
4135 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4136 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4137 tcg_temp_free(temp
);
4139 case OPC2_32_BO_ST_W_SHORTOFF
:
4140 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4142 case OPC2_32_BO_ST_W_POSTINC
:
4143 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4145 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4147 case OPC2_32_BO_ST_W_PREINC
:
4148 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4153 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4159 TCGv temp
, temp2
, temp3
;
4161 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4162 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4163 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4164 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4166 temp
= tcg_temp_new();
4167 temp2
= tcg_temp_new();
4168 temp3
= tcg_const_i32(off10
);
4170 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4171 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4174 case OPC2_32_BO_CACHEA_WI_BR
:
4175 case OPC2_32_BO_CACHEA_W_BR
:
4176 case OPC2_32_BO_CACHEA_I_BR
:
4177 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4179 case OPC2_32_BO_CACHEA_WI_CIRC
:
4180 case OPC2_32_BO_CACHEA_W_CIRC
:
4181 case OPC2_32_BO_CACHEA_I_CIRC
:
4182 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4184 case OPC2_32_BO_ST_A_BR
:
4185 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4186 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4188 case OPC2_32_BO_ST_A_CIRC
:
4189 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4190 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4192 case OPC2_32_BO_ST_B_BR
:
4193 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4194 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4196 case OPC2_32_BO_ST_B_CIRC
:
4197 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4198 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4200 case OPC2_32_BO_ST_D_BR
:
4201 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4202 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4204 case OPC2_32_BO_ST_D_CIRC
:
4205 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4206 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4207 tcg_gen_addi_tl(temp
, temp
, 4);
4208 tcg_gen_rem_tl(temp
, temp
, temp2
);
4209 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4210 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4211 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4213 case OPC2_32_BO_ST_DA_BR
:
4214 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4215 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4217 case OPC2_32_BO_ST_DA_CIRC
:
4218 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4219 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4220 tcg_gen_addi_tl(temp
, temp
, 4);
4221 tcg_gen_rem_tl(temp
, temp
, temp2
);
4222 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4223 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4224 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4226 case OPC2_32_BO_ST_H_BR
:
4227 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4228 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4230 case OPC2_32_BO_ST_H_CIRC
:
4231 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4232 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4234 case OPC2_32_BO_ST_Q_BR
:
4235 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4236 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4237 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4239 case OPC2_32_BO_ST_Q_CIRC
:
4240 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4241 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4242 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4244 case OPC2_32_BO_ST_W_BR
:
4245 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4246 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4248 case OPC2_32_BO_ST_W_CIRC
:
4249 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4250 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4253 tcg_temp_free(temp
);
4254 tcg_temp_free(temp2
);
4255 tcg_temp_free(temp3
);
4258 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4266 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4267 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4268 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4269 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4272 case OPC2_32_BO_LD_A_SHORTOFF
:
4273 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4275 case OPC2_32_BO_LD_A_POSTINC
:
4276 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4278 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4280 case OPC2_32_BO_LD_A_PREINC
:
4281 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4283 case OPC2_32_BO_LD_B_SHORTOFF
:
4284 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4286 case OPC2_32_BO_LD_B_POSTINC
:
4287 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4289 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4291 case OPC2_32_BO_LD_B_PREINC
:
4292 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4294 case OPC2_32_BO_LD_BU_SHORTOFF
:
4295 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4297 case OPC2_32_BO_LD_BU_POSTINC
:
4298 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4300 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4302 case OPC2_32_BO_LD_BU_PREINC
:
4303 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4305 case OPC2_32_BO_LD_D_SHORTOFF
:
4306 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4309 case OPC2_32_BO_LD_D_POSTINC
:
4310 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4311 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4313 case OPC2_32_BO_LD_D_PREINC
:
4314 temp
= tcg_temp_new();
4315 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4316 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4317 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4318 tcg_temp_free(temp
);
4320 case OPC2_32_BO_LD_DA_SHORTOFF
:
4321 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4324 case OPC2_32_BO_LD_DA_POSTINC
:
4325 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4326 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4328 case OPC2_32_BO_LD_DA_PREINC
:
4329 temp
= tcg_temp_new();
4330 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4331 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4332 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4333 tcg_temp_free(temp
);
4335 case OPC2_32_BO_LD_H_SHORTOFF
:
4336 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4338 case OPC2_32_BO_LD_H_POSTINC
:
4339 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4341 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4343 case OPC2_32_BO_LD_H_PREINC
:
4344 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4346 case OPC2_32_BO_LD_HU_SHORTOFF
:
4347 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4349 case OPC2_32_BO_LD_HU_POSTINC
:
4350 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4352 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4354 case OPC2_32_BO_LD_HU_PREINC
:
4355 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4357 case OPC2_32_BO_LD_Q_SHORTOFF
:
4358 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4359 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4361 case OPC2_32_BO_LD_Q_POSTINC
:
4362 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4364 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4365 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4367 case OPC2_32_BO_LD_Q_PREINC
:
4368 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4369 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4371 case OPC2_32_BO_LD_W_SHORTOFF
:
4372 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4374 case OPC2_32_BO_LD_W_POSTINC
:
4375 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4377 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4379 case OPC2_32_BO_LD_W_PREINC
:
4380 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4385 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4392 TCGv temp
, temp2
, temp3
;
4394 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4395 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4396 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4397 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4399 temp
= tcg_temp_new();
4400 temp2
= tcg_temp_new();
4401 temp3
= tcg_const_i32(off10
);
4403 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4404 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4408 case OPC2_32_BO_LD_A_BR
:
4409 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4410 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4412 case OPC2_32_BO_LD_A_CIRC
:
4413 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4414 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4416 case OPC2_32_BO_LD_B_BR
:
4417 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4418 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4420 case OPC2_32_BO_LD_B_CIRC
:
4421 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4422 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4424 case OPC2_32_BO_LD_BU_BR
:
4425 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4426 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4428 case OPC2_32_BO_LD_BU_CIRC
:
4429 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4430 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4432 case OPC2_32_BO_LD_D_BR
:
4433 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4434 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4436 case OPC2_32_BO_LD_D_CIRC
:
4437 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4438 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4439 tcg_gen_addi_tl(temp
, temp
, 4);
4440 tcg_gen_rem_tl(temp
, temp
, temp2
);
4441 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4442 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4443 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4445 case OPC2_32_BO_LD_DA_BR
:
4446 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4447 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4449 case OPC2_32_BO_LD_DA_CIRC
:
4450 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4451 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4452 tcg_gen_addi_tl(temp
, temp
, 4);
4453 tcg_gen_rem_tl(temp
, temp
, temp2
);
4454 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4455 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4456 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4458 case OPC2_32_BO_LD_H_BR
:
4459 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4460 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4462 case OPC2_32_BO_LD_H_CIRC
:
4463 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4464 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4466 case OPC2_32_BO_LD_HU_BR
:
4467 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4468 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4470 case OPC2_32_BO_LD_HU_CIRC
:
4471 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4472 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4474 case OPC2_32_BO_LD_Q_BR
:
4475 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4476 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4477 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4479 case OPC2_32_BO_LD_Q_CIRC
:
4480 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4481 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4482 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4484 case OPC2_32_BO_LD_W_BR
:
4485 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4486 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4488 case OPC2_32_BO_LD_W_CIRC
:
4489 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4490 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4493 tcg_temp_free(temp
);
4494 tcg_temp_free(temp2
);
4495 tcg_temp_free(temp3
);
4498 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
4507 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4508 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4509 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4510 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4513 temp
= tcg_temp_new();
4514 temp2
= tcg_temp_new();
4517 case OPC2_32_BO_LDLCX_SHORTOFF
:
4518 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4519 gen_helper_ldlcx(cpu_env
, temp
);
4521 case OPC2_32_BO_LDMST_SHORTOFF
:
4522 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4523 gen_ldmst(ctx
, r1
, temp
);
4525 case OPC2_32_BO_LDMST_POSTINC
:
4526 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4527 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4529 case OPC2_32_BO_LDMST_PREINC
:
4530 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4531 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4533 case OPC2_32_BO_LDUCX_SHORTOFF
:
4534 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4535 gen_helper_lducx(cpu_env
, temp
);
4537 case OPC2_32_BO_LEA_SHORTOFF
:
4538 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4540 case OPC2_32_BO_STLCX_SHORTOFF
:
4541 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4542 gen_helper_stlcx(cpu_env
, temp
);
4544 case OPC2_32_BO_STUCX_SHORTOFF
:
4545 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4546 gen_helper_stucx(cpu_env
, temp
);
4548 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4549 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4550 gen_swap(ctx
, r1
, temp
);
4552 case OPC2_32_BO_SWAP_W_POSTINC
:
4553 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4554 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4556 case OPC2_32_BO_SWAP_W_PREINC
:
4557 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4558 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4561 tcg_temp_free(temp
);
4562 tcg_temp_free(temp2
);
4565 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
4572 TCGv temp
, temp2
, temp3
;
4574 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4575 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4576 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4577 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4579 temp
= tcg_temp_new();
4580 temp2
= tcg_temp_new();
4581 temp3
= tcg_const_i32(off10
);
4583 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4584 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4587 case OPC2_32_BO_LDMST_BR
:
4588 gen_ldmst(ctx
, r1
, temp2
);
4589 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4591 case OPC2_32_BO_LDMST_CIRC
:
4592 gen_ldmst(ctx
, r1
, temp2
);
4593 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4595 case OPC2_32_BO_SWAP_W_BR
:
4596 gen_swap(ctx
, r1
, temp2
);
4597 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4599 case OPC2_32_BO_SWAP_W_CIRC
:
4600 gen_swap(ctx
, r1
, temp2
);
4601 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4604 tcg_temp_free(temp
);
4605 tcg_temp_free(temp2
);
4606 tcg_temp_free(temp3
);
4609 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
4615 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4616 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4617 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4620 case OPC1_32_BOL_LD_A_LONGOFF
:
4621 temp
= tcg_temp_new();
4622 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4623 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4624 tcg_temp_free(temp
);
4626 case OPC1_32_BOL_LD_W_LONGOFF
:
4627 temp
= tcg_temp_new();
4628 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4629 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4630 tcg_temp_free(temp
);
4632 case OPC1_32_BOL_LEA_LONGOFF
:
4633 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4635 case OPC1_32_BOL_ST_A_LONGOFF
:
4636 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4637 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4639 /* raise illegal opcode trap */
4642 case OPC1_32_BOL_ST_W_LONGOFF
:
4643 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4645 case OPC1_32_BOL_LD_B_LONGOFF
:
4646 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4647 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4649 /* raise illegal opcode trap */
4652 case OPC1_32_BOL_LD_BU_LONGOFF
:
4653 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4654 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4656 /* raise illegal opcode trap */
4659 case OPC1_32_BOL_LD_H_LONGOFF
:
4660 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4661 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4663 /* raise illegal opcode trap */
4666 case OPC1_32_BOL_LD_HU_LONGOFF
:
4667 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4668 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4670 /* raise illegal opcode trap */
4673 case OPC1_32_BOL_ST_B_LONGOFF
:
4674 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4675 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4677 /* raise illegal opcode trap */
4680 case OPC1_32_BOL_ST_H_LONGOFF
:
4681 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4682 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4684 /* raise illegal opcode trap */
4691 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
4698 r2
= MASK_OP_RC_D(ctx
->opcode
);
4699 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4700 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4701 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4703 temp
= tcg_temp_new();
4706 case OPC2_32_RC_AND
:
4707 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4709 case OPC2_32_RC_ANDN
:
4710 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4712 case OPC2_32_RC_NAND
:
4713 tcg_gen_movi_tl(temp
, const9
);
4714 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4716 case OPC2_32_RC_NOR
:
4717 tcg_gen_movi_tl(temp
, const9
);
4718 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4721 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4723 case OPC2_32_RC_ORN
:
4724 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4727 const9
= sextract32(const9
, 0, 6);
4728 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4730 case OPC2_32_RC_SH_H
:
4731 const9
= sextract32(const9
, 0, 5);
4732 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4734 case OPC2_32_RC_SHA
:
4735 const9
= sextract32(const9
, 0, 6);
4736 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4738 case OPC2_32_RC_SHA_H
:
4739 const9
= sextract32(const9
, 0, 5);
4740 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4742 case OPC2_32_RC_SHAS
:
4743 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4745 case OPC2_32_RC_XNOR
:
4746 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4747 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
4749 case OPC2_32_RC_XOR
:
4750 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4753 tcg_temp_free(temp
);
4756 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4764 r2
= MASK_OP_RC_D(ctx
->opcode
);
4765 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4766 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4768 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4770 temp
= tcg_temp_new();
4773 case OPC2_32_RC_ABSDIF
:
4774 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4776 case OPC2_32_RC_ABSDIFS
:
4777 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4779 case OPC2_32_RC_ADD
:
4780 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4782 case OPC2_32_RC_ADDC
:
4783 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4785 case OPC2_32_RC_ADDS
:
4786 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4788 case OPC2_32_RC_ADDS_U
:
4789 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4791 case OPC2_32_RC_ADDX
:
4792 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4794 case OPC2_32_RC_AND_EQ
:
4795 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4796 const9
, &tcg_gen_and_tl
);
4798 case OPC2_32_RC_AND_GE
:
4799 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4800 const9
, &tcg_gen_and_tl
);
4802 case OPC2_32_RC_AND_GE_U
:
4803 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4804 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4805 const9
, &tcg_gen_and_tl
);
4807 case OPC2_32_RC_AND_LT
:
4808 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4809 const9
, &tcg_gen_and_tl
);
4811 case OPC2_32_RC_AND_LT_U
:
4812 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4813 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4814 const9
, &tcg_gen_and_tl
);
4816 case OPC2_32_RC_AND_NE
:
4817 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4818 const9
, &tcg_gen_and_tl
);
4821 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4823 case OPC2_32_RC_EQANY_B
:
4824 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4826 case OPC2_32_RC_EQANY_H
:
4827 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4830 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4832 case OPC2_32_RC_GE_U
:
4833 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4834 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4837 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4839 case OPC2_32_RC_LT_U
:
4840 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4841 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4843 case OPC2_32_RC_MAX
:
4844 tcg_gen_movi_tl(temp
, const9
);
4845 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4846 cpu_gpr_d
[r1
], temp
);
4848 case OPC2_32_RC_MAX_U
:
4849 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4850 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4851 cpu_gpr_d
[r1
], temp
);
4853 case OPC2_32_RC_MIN
:
4854 tcg_gen_movi_tl(temp
, const9
);
4855 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4856 cpu_gpr_d
[r1
], temp
);
4858 case OPC2_32_RC_MIN_U
:
4859 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4860 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4861 cpu_gpr_d
[r1
], temp
);
4864 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4866 case OPC2_32_RC_OR_EQ
:
4867 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4868 const9
, &tcg_gen_or_tl
);
4870 case OPC2_32_RC_OR_GE
:
4871 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4872 const9
, &tcg_gen_or_tl
);
4874 case OPC2_32_RC_OR_GE_U
:
4875 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4876 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4877 const9
, &tcg_gen_or_tl
);
4879 case OPC2_32_RC_OR_LT
:
4880 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4881 const9
, &tcg_gen_or_tl
);
4883 case OPC2_32_RC_OR_LT_U
:
4884 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4885 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4886 const9
, &tcg_gen_or_tl
);
4888 case OPC2_32_RC_OR_NE
:
4889 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4890 const9
, &tcg_gen_or_tl
);
4892 case OPC2_32_RC_RSUB
:
4893 tcg_gen_movi_tl(temp
, const9
);
4894 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4896 case OPC2_32_RC_RSUBS
:
4897 tcg_gen_movi_tl(temp
, const9
);
4898 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4900 case OPC2_32_RC_RSUBS_U
:
4901 tcg_gen_movi_tl(temp
, const9
);
4902 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4904 case OPC2_32_RC_SH_EQ
:
4905 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4907 case OPC2_32_RC_SH_GE
:
4908 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4910 case OPC2_32_RC_SH_GE_U
:
4911 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4912 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4914 case OPC2_32_RC_SH_LT
:
4915 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4917 case OPC2_32_RC_SH_LT_U
:
4918 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4919 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4921 case OPC2_32_RC_SH_NE
:
4922 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4924 case OPC2_32_RC_XOR_EQ
:
4925 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4926 const9
, &tcg_gen_xor_tl
);
4928 case OPC2_32_RC_XOR_GE
:
4929 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4930 const9
, &tcg_gen_xor_tl
);
4932 case OPC2_32_RC_XOR_GE_U
:
4933 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4934 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4935 const9
, &tcg_gen_xor_tl
);
4937 case OPC2_32_RC_XOR_LT
:
4938 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4939 const9
, &tcg_gen_xor_tl
);
4941 case OPC2_32_RC_XOR_LT_U
:
4942 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4943 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4944 const9
, &tcg_gen_xor_tl
);
4946 case OPC2_32_RC_XOR_NE
:
4947 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4948 const9
, &tcg_gen_xor_tl
);
4951 tcg_temp_free(temp
);
4954 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
4959 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4960 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4963 case OPC2_32_RC_BISR
:
4964 gen_helper_1arg(bisr
, const9
);
4966 case OPC2_32_RC_SYSCALL
:
4967 /* TODO: Add exception generation */
4972 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
4978 r2
= MASK_OP_RC_D(ctx
->opcode
);
4979 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4980 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4982 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4985 case OPC2_32_RC_MUL_32
:
4986 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4988 case OPC2_32_RC_MUL_64
:
4989 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4991 case OPC2_32_RC_MULS_32
:
4992 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4994 case OPC2_32_RC_MUL_U_64
:
4995 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4996 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4998 case OPC2_32_RC_MULS_U_32
:
4999 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5000 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5006 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5010 int32_t pos
, width
, const4
;
5014 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5015 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5016 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5017 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5018 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5019 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5022 case OPC2_32_RCPW_IMASK
:
5023 /* if pos + width > 31 undefined result */
5024 if (pos
+ width
<= 31) {
5025 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5026 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5029 case OPC2_32_RCPW_INSERT
:
5030 /* if pos + width > 32 undefined result */
5031 if (pos
+ width
<= 32) {
5032 temp
= tcg_const_i32(const4
);
5033 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5034 tcg_temp_free(temp
);
5042 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5046 int32_t width
, const4
;
5048 TCGv temp
, temp2
, temp3
;
5050 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5051 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5052 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5053 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5054 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5055 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5057 temp
= tcg_temp_new();
5058 temp2
= tcg_temp_new();
5061 case OPC2_32_RCRW_IMASK
:
5062 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5063 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5064 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5065 tcg_gen_movi_tl(temp2
, const4
);
5066 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5068 case OPC2_32_RCRW_INSERT
:
5069 temp3
= tcg_temp_new();
5071 tcg_gen_movi_tl(temp
, width
);
5072 tcg_gen_movi_tl(temp2
, const4
);
5073 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5074 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5076 tcg_temp_free(temp3
);
5079 tcg_temp_free(temp
);
5080 tcg_temp_free(temp2
);
5085 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5093 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5094 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5095 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5096 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5097 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5100 case OPC2_32_RCR_CADD
:
5101 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5104 case OPC2_32_RCR_CADDN
:
5105 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5108 case OPC2_32_RCR_SEL
:
5109 temp
= tcg_const_i32(0);
5110 temp2
= tcg_const_i32(const9
);
5111 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5112 cpu_gpr_d
[r1
], temp2
);
5113 tcg_temp_free(temp
);
5114 tcg_temp_free(temp2
);
5116 case OPC2_32_RCR_SELN
:
5117 temp
= tcg_const_i32(0);
5118 temp2
= tcg_const_i32(const9
);
5119 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5120 cpu_gpr_d
[r1
], temp2
);
5121 tcg_temp_free(temp
);
5122 tcg_temp_free(temp2
);
5127 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5134 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5135 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5136 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5137 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5138 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5141 case OPC2_32_RCR_MADD_32
:
5142 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5144 case OPC2_32_RCR_MADD_64
:
5145 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5146 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5148 case OPC2_32_RCR_MADDS_32
:
5149 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5151 case OPC2_32_RCR_MADDS_64
:
5152 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5153 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5155 case OPC2_32_RCR_MADD_U_64
:
5156 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5157 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5158 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5160 case OPC2_32_RCR_MADDS_U_32
:
5161 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5162 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5164 case OPC2_32_RCR_MADDS_U_64
:
5165 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5166 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5167 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5172 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5179 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5180 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5181 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5182 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5183 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5186 case OPC2_32_RCR_MSUB_32
:
5187 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5189 case OPC2_32_RCR_MSUB_64
:
5190 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5191 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5193 case OPC2_32_RCR_MSUBS_32
:
5194 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5196 case OPC2_32_RCR_MSUBS_64
:
5197 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5198 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5200 case OPC2_32_RCR_MSUB_U_64
:
5201 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5202 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5203 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5205 case OPC2_32_RCR_MSUBS_U_32
:
5206 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5207 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5209 case OPC2_32_RCR_MSUBS_U_64
:
5210 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5211 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5212 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5219 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5225 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5226 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5227 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5230 case OPC1_32_RLC_ADDI
:
5231 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5233 case OPC1_32_RLC_ADDIH
:
5234 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5236 case OPC1_32_RLC_ADDIH_A
:
5237 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5239 case OPC1_32_RLC_MFCR
:
5240 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5241 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5243 case OPC1_32_RLC_MOV
:
5244 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5246 case OPC1_32_RLC_MOV_64
:
5247 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5248 if ((r2
& 0x1) != 0) {
5249 /* TODO: raise OPD trap */
5251 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5252 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5254 /* TODO: raise illegal opcode trap */
5257 case OPC1_32_RLC_MOV_U
:
5258 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5259 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5261 case OPC1_32_RLC_MOV_H
:
5262 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5264 case OPC1_32_RLC_MOVH_A
:
5265 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5267 case OPC1_32_RLC_MTCR
:
5268 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5269 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5275 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5280 r3
= MASK_OP_RR_D(ctx
->opcode
);
5281 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5282 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5283 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5286 case OPC2_32_RR_ABS
:
5287 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5289 case OPC2_32_RR_ABS_B
:
5290 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5292 case OPC2_32_RR_ABS_H
:
5293 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5295 case OPC2_32_RR_ABSDIF
:
5296 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5298 case OPC2_32_RR_ABSDIF_B
:
5299 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5302 case OPC2_32_RR_ABSDIF_H
:
5303 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5306 case OPC2_32_RR_ABSDIFS
:
5307 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5310 case OPC2_32_RR_ABSDIFS_H
:
5311 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5314 case OPC2_32_RR_ABSS
:
5315 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5317 case OPC2_32_RR_ABSS_H
:
5318 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5320 case OPC2_32_RR_ADD
:
5321 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5323 case OPC2_32_RR_ADD_B
:
5324 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5326 case OPC2_32_RR_ADD_H
:
5327 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5329 case OPC2_32_RR_ADDC
:
5330 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5332 case OPC2_32_RR_ADDS
:
5333 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5335 case OPC2_32_RR_ADDS_H
:
5336 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5339 case OPC2_32_RR_ADDS_HU
:
5340 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5343 case OPC2_32_RR_ADDS_U
:
5344 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5347 case OPC2_32_RR_ADDX
:
5348 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5350 case OPC2_32_RR_AND_EQ
:
5351 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5352 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5354 case OPC2_32_RR_AND_GE
:
5355 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5356 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5358 case OPC2_32_RR_AND_GE_U
:
5359 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5360 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5362 case OPC2_32_RR_AND_LT
:
5363 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5364 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5366 case OPC2_32_RR_AND_LT_U
:
5367 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5368 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5370 case OPC2_32_RR_AND_NE
:
5371 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5372 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5375 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5378 case OPC2_32_RR_EQ_B
:
5379 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5381 case OPC2_32_RR_EQ_H
:
5382 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5384 case OPC2_32_RR_EQ_W
:
5385 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5387 case OPC2_32_RR_EQANY_B
:
5388 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5390 case OPC2_32_RR_EQANY_H
:
5391 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5394 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5397 case OPC2_32_RR_GE_U
:
5398 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5402 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5405 case OPC2_32_RR_LT_U
:
5406 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5409 case OPC2_32_RR_LT_B
:
5410 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5412 case OPC2_32_RR_LT_BU
:
5413 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5415 case OPC2_32_RR_LT_H
:
5416 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5418 case OPC2_32_RR_LT_HU
:
5419 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5421 case OPC2_32_RR_LT_W
:
5422 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5424 case OPC2_32_RR_LT_WU
:
5425 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5427 case OPC2_32_RR_MAX
:
5428 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5429 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5431 case OPC2_32_RR_MAX_U
:
5432 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5433 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5435 case OPC2_32_RR_MAX_B
:
5436 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5438 case OPC2_32_RR_MAX_BU
:
5439 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5441 case OPC2_32_RR_MAX_H
:
5442 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5444 case OPC2_32_RR_MAX_HU
:
5445 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5447 case OPC2_32_RR_MIN
:
5448 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5449 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5451 case OPC2_32_RR_MIN_U
:
5452 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5453 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5455 case OPC2_32_RR_MIN_B
:
5456 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5458 case OPC2_32_RR_MIN_BU
:
5459 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5461 case OPC2_32_RR_MIN_H
:
5462 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5464 case OPC2_32_RR_MIN_HU
:
5465 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5467 case OPC2_32_RR_MOV
:
5468 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5471 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5474 case OPC2_32_RR_OR_EQ
:
5475 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5476 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5478 case OPC2_32_RR_OR_GE
:
5479 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5480 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5482 case OPC2_32_RR_OR_GE_U
:
5483 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5484 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5486 case OPC2_32_RR_OR_LT
:
5487 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5488 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5490 case OPC2_32_RR_OR_LT_U
:
5491 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5492 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5494 case OPC2_32_RR_OR_NE
:
5495 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5496 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5498 case OPC2_32_RR_SAT_B
:
5499 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5501 case OPC2_32_RR_SAT_BU
:
5502 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5504 case OPC2_32_RR_SAT_H
:
5505 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5507 case OPC2_32_RR_SAT_HU
:
5508 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5510 case OPC2_32_RR_SH_EQ
:
5511 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5514 case OPC2_32_RR_SH_GE
:
5515 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5518 case OPC2_32_RR_SH_GE_U
:
5519 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5522 case OPC2_32_RR_SH_LT
:
5523 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5526 case OPC2_32_RR_SH_LT_U
:
5527 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5530 case OPC2_32_RR_SH_NE
:
5531 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5534 case OPC2_32_RR_SUB
:
5535 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5537 case OPC2_32_RR_SUB_B
:
5538 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5540 case OPC2_32_RR_SUB_H
:
5541 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5543 case OPC2_32_RR_SUBC
:
5544 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5546 case OPC2_32_RR_SUBS
:
5547 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5549 case OPC2_32_RR_SUBS_U
:
5550 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5552 case OPC2_32_RR_SUBS_H
:
5553 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5556 case OPC2_32_RR_SUBS_HU
:
5557 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5560 case OPC2_32_RR_SUBX
:
5561 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5563 case OPC2_32_RR_XOR_EQ
:
5564 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5565 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5567 case OPC2_32_RR_XOR_GE
:
5568 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5569 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5571 case OPC2_32_RR_XOR_GE_U
:
5572 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5573 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5575 case OPC2_32_RR_XOR_LT
:
5576 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5577 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5579 case OPC2_32_RR_XOR_LT_U
:
5580 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5581 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5583 case OPC2_32_RR_XOR_NE
:
5584 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5585 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5590 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5596 r3
= MASK_OP_RR_D(ctx
->opcode
);
5597 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5598 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5600 temp
= tcg_temp_new();
5601 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5604 case OPC2_32_RR_AND
:
5605 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5607 case OPC2_32_RR_ANDN
:
5608 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5610 case OPC2_32_RR_CLO
:
5611 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5613 case OPC2_32_RR_CLO_H
:
5614 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5616 case OPC2_32_RR_CLS
:
5617 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5619 case OPC2_32_RR_CLS_H
:
5620 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5622 case OPC2_32_RR_CLZ
:
5623 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5625 case OPC2_32_RR_CLZ_H
:
5626 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5628 case OPC2_32_RR_NAND
:
5629 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5631 case OPC2_32_RR_NOR
:
5632 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5635 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5637 case OPC2_32_RR_ORN
:
5638 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5641 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5643 case OPC2_32_RR_SH_H
:
5644 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5646 case OPC2_32_RR_SHA
:
5647 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5649 case OPC2_32_RR_SHA_H
:
5650 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5652 case OPC2_32_RR_SHAS
:
5653 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5655 case OPC2_32_RR_XNOR
:
5656 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5658 case OPC2_32_RR_XOR
:
5659 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5662 tcg_temp_free(temp
);
5665 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
5671 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5672 r3
= MASK_OP_RR_D(ctx
->opcode
);
5673 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5674 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5675 n
= MASK_OP_RR_N(ctx
->opcode
);
5678 case OPC2_32_RR_ADD_A
:
5679 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5681 case OPC2_32_RR_ADDSC_A
:
5682 temp
= tcg_temp_new();
5683 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5684 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
5685 tcg_temp_free(temp
);
5687 case OPC2_32_RR_ADDSC_AT
:
5688 temp
= tcg_temp_new();
5689 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
5690 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
5691 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
5692 tcg_temp_free(temp
);
5694 case OPC2_32_RR_EQ_A
:
5695 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5698 case OPC2_32_RR_EQZ
:
5699 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
5701 case OPC2_32_RR_GE_A
:
5702 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5705 case OPC2_32_RR_LT_A
:
5706 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5709 case OPC2_32_RR_MOV_A
:
5710 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
5712 case OPC2_32_RR_MOV_AA
:
5713 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
5715 case OPC2_32_RR_MOV_D
:
5716 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
5718 case OPC2_32_RR_NE_A
:
5719 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5722 case OPC2_32_RR_NEZ_A
:
5723 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
5725 case OPC2_32_RR_SUB_A
:
5726 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5731 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
5736 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5737 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5741 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5743 case OPC2_32_RR_JLI
:
5744 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
5745 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5747 case OPC2_32_RR_CALLI
:
5748 gen_helper_1arg(call
, ctx
->next_pc
);
5749 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5753 ctx
->bstate
= BS_BRANCH
;
5756 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
5763 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5764 r3
= MASK_OP_RR_D(ctx
->opcode
);
5765 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5766 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5769 case OPC2_32_RR_BMERGE
:
5770 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5772 case OPC2_32_RR_BSPLIT
:
5773 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5775 case OPC2_32_RR_DVINIT_B
:
5776 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5779 case OPC2_32_RR_DVINIT_BU
:
5780 temp
= tcg_temp_new();
5781 temp2
= tcg_temp_new();
5783 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5784 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5785 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5786 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5787 /* use cpu_PSW_AV to compare against 0 */
5788 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5789 temp
, cpu_gpr_d
[r3
+1]);
5790 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5791 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5792 temp2
, cpu_gpr_d
[r2
]);
5793 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5795 /* overflow = (D[b] == 0) */
5796 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5798 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5800 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5802 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 8);
5803 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
5804 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp
);
5806 tcg_temp_free(temp
);
5807 tcg_temp_free(temp2
);
5809 case OPC2_32_RR_DVINIT_H
:
5810 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5813 case OPC2_32_RR_DVINIT_HU
:
5814 temp
= tcg_temp_new();
5815 temp2
= tcg_temp_new();
5817 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5818 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5819 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5820 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5821 /* use cpu_PSW_AV to compare against 0 */
5822 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5823 temp
, cpu_gpr_d
[r3
+1]);
5824 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5825 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5826 temp2
, cpu_gpr_d
[r2
]);
5827 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5829 /* overflow = (D[b] == 0) */
5830 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5832 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5834 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5836 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5837 tcg_gen_shri_tl(cpu_gpr_d
[r3
+1], temp
, 16);
5838 tcg_gen_shli_tl(cpu_gpr_d
[r3
], temp
, 16);
5839 tcg_temp_free(temp
);
5840 tcg_temp_free(temp2
);
5842 case OPC2_32_RR_DVINIT
:
5843 temp
= tcg_temp_new();
5844 temp2
= tcg_temp_new();
5845 /* overflow = ((D[b] == 0) ||
5846 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
5847 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
5848 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
5849 tcg_gen_and_tl(temp
, temp
, temp2
);
5850 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
5851 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
5852 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5854 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5856 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5858 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5859 /* sign extend to high reg */
5860 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
5861 tcg_temp_free(temp
);
5862 tcg_temp_free(temp2
);
5864 case OPC2_32_RR_DVINIT_U
:
5865 /* overflow = (D[b] == 0) */
5866 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5867 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5869 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5871 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5873 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5874 /* zero extend to high reg*/
5875 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
5877 case OPC2_32_RR_PARITY
:
5878 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5880 case OPC2_32_RR_UNPACK
:
5881 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5887 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5895 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
5896 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
5897 r3
= MASK_OP_RR1_D(ctx
->opcode
);
5898 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
5899 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
5902 case OPC2_32_RR1_MUL_H_32_LL
:
5903 temp64
= tcg_temp_new_i64();
5904 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5905 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5906 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5907 tcg_temp_free_i64(temp64
);
5909 case OPC2_32_RR1_MUL_H_32_LU
:
5910 temp64
= tcg_temp_new_i64();
5911 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5912 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5913 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5914 tcg_temp_free_i64(temp64
);
5916 case OPC2_32_RR1_MUL_H_32_UL
:
5917 temp64
= tcg_temp_new_i64();
5918 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5919 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5920 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5921 tcg_temp_free_i64(temp64
);
5923 case OPC2_32_RR1_MUL_H_32_UU
:
5924 temp64
= tcg_temp_new_i64();
5925 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5926 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5927 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5928 tcg_temp_free_i64(temp64
);
5930 case OPC2_32_RR1_MULM_H_64_LL
:
5931 temp64
= tcg_temp_new_i64();
5932 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5933 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5935 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5937 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5938 tcg_temp_free_i64(temp64
);
5940 case OPC2_32_RR1_MULM_H_64_LU
:
5941 temp64
= tcg_temp_new_i64();
5942 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5943 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5945 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5947 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5948 tcg_temp_free_i64(temp64
);
5950 case OPC2_32_RR1_MULM_H_64_UL
:
5951 temp64
= tcg_temp_new_i64();
5952 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5953 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5955 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5957 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5958 tcg_temp_free_i64(temp64
);
5960 case OPC2_32_RR1_MULM_H_64_UU
:
5961 temp64
= tcg_temp_new_i64();
5962 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5963 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5965 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5967 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5968 tcg_temp_free_i64(temp64
);
5971 case OPC2_32_RR1_MULR_H_16_LL
:
5972 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5973 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5975 case OPC2_32_RR1_MULR_H_16_LU
:
5976 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5977 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5979 case OPC2_32_RR1_MULR_H_16_UL
:
5980 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5981 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5983 case OPC2_32_RR1_MULR_H_16_UU
:
5984 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5985 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5991 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
5999 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6000 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6001 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6002 n
= MASK_OP_RR1_N(ctx
->opcode
);
6003 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6005 temp
= tcg_temp_new();
6006 temp2
= tcg_temp_new();
6009 case OPC2_32_RR1_MUL_Q_32
:
6010 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6012 case OPC2_32_RR1_MUL_Q_64
:
6013 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6016 case OPC2_32_RR1_MUL_Q_32_L
:
6017 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6018 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6020 case OPC2_32_RR1_MUL_Q_64_L
:
6021 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6022 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6024 case OPC2_32_RR1_MUL_Q_32_U
:
6025 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6026 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6028 case OPC2_32_RR1_MUL_Q_64_U
:
6029 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6030 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6032 case OPC2_32_RR1_MUL_Q_32_LL
:
6033 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6034 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6035 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6037 case OPC2_32_RR1_MUL_Q_32_UU
:
6038 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6039 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6040 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6042 case OPC2_32_RR1_MULR_Q_32_L
:
6043 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6044 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6045 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6047 case OPC2_32_RR1_MULR_Q_32_U
:
6048 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6049 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6050 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6053 tcg_temp_free(temp
);
6054 tcg_temp_free(temp2
);
6058 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6063 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6064 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6065 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6066 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6068 case OPC2_32_RR2_MUL_32
:
6069 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6071 case OPC2_32_RR2_MUL_64
:
6072 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6075 case OPC2_32_RR2_MULS_32
:
6076 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6079 case OPC2_32_RR2_MUL_U_64
:
6080 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6083 case OPC2_32_RR2_MULS_U_32
:
6084 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6091 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6097 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6098 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6099 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6100 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6101 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6102 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6105 case OPC2_32_RRPW_EXTR
:
6106 if (pos
+ width
<= 31) {
6107 /* optimize special cases */
6108 if ((pos
== 0) && (width
== 8)) {
6109 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6110 } else if ((pos
== 0) && (width
== 16)) {
6111 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6113 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6114 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6118 case OPC2_32_RRPW_EXTR_U
:
6120 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6122 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6123 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6126 case OPC2_32_RRPW_IMASK
:
6127 if (pos
+ width
<= 31) {
6128 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6129 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6132 case OPC2_32_RRPW_INSERT
:
6133 if (pos
+ width
<= 31) {
6134 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6142 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6148 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6149 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6150 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6151 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6152 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6155 case OPC2_32_RRR_CADD
:
6156 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6157 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6159 case OPC2_32_RRR_CADDN
:
6160 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6163 case OPC2_32_RRR_CSUB
:
6164 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6167 case OPC2_32_RRR_CSUBN
:
6168 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6171 case OPC2_32_RRR_SEL
:
6172 temp
= tcg_const_i32(0);
6173 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6174 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6175 tcg_temp_free(temp
);
6177 case OPC2_32_RRR_SELN
:
6178 temp
= tcg_const_i32(0);
6179 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6180 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6181 tcg_temp_free(temp
);
6186 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6192 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6193 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6194 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6195 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6196 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6199 case OPC2_32_RRR_DVADJ
:
6200 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6201 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6203 case OPC2_32_RRR_DVSTEP
:
6204 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6205 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6207 case OPC2_32_RRR_DVSTEP_U
:
6208 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6209 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6211 case OPC2_32_RRR_IXMAX
:
6212 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6213 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6215 case OPC2_32_RRR_IXMAX_U
:
6216 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6217 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6219 case OPC2_32_RRR_IXMIN
:
6220 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6221 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6223 case OPC2_32_RRR_IXMIN_U
:
6224 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6225 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6227 case OPC2_32_RRR_PACK
:
6228 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6229 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6235 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6238 uint32_t r1
, r2
, r3
, r4
;
6240 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6241 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6242 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6243 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6244 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6246 case OPC2_32_RRR2_MADD_32
:
6247 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6250 case OPC2_32_RRR2_MADD_64
:
6251 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6252 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6254 case OPC2_32_RRR2_MADDS_32
:
6255 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6256 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6258 case OPC2_32_RRR2_MADDS_64
:
6259 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6260 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6262 case OPC2_32_RRR2_MADD_U_64
:
6263 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6264 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6266 case OPC2_32_RRR2_MADDS_U_32
:
6267 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6268 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6270 case OPC2_32_RRR2_MADDS_U_64
:
6271 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6272 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6277 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6280 uint32_t r1
, r2
, r3
, r4
;
6282 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6283 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6284 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6285 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6286 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6289 case OPC2_32_RRR2_MSUB_32
:
6290 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6293 case OPC2_32_RRR2_MSUB_64
:
6294 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6295 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6297 case OPC2_32_RRR2_MSUBS_32
:
6298 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6299 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6301 case OPC2_32_RRR2_MSUBS_64
:
6302 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6303 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6305 case OPC2_32_RRR2_MSUB_U_64
:
6306 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6307 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6309 case OPC2_32_RRR2_MSUBS_U_32
:
6310 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6311 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6313 case OPC2_32_RRR2_MSUBS_U_64
:
6314 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6315 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6321 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6324 uint32_t r1
, r2
, r3
, r4
, n
;
6326 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6327 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6328 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6329 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6330 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6331 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6334 case OPC2_32_RRR1_MADD_H_LL
:
6335 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6336 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6338 case OPC2_32_RRR1_MADD_H_LU
:
6339 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6340 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6342 case OPC2_32_RRR1_MADD_H_UL
:
6343 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6344 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6346 case OPC2_32_RRR1_MADD_H_UU
:
6347 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6348 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6350 case OPC2_32_RRR1_MADDS_H_LL
:
6351 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6352 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6354 case OPC2_32_RRR1_MADDS_H_LU
:
6355 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6356 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6358 case OPC2_32_RRR1_MADDS_H_UL
:
6359 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6360 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6362 case OPC2_32_RRR1_MADDS_H_UU
:
6363 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6364 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6366 case OPC2_32_RRR1_MADDM_H_LL
:
6367 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6368 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6370 case OPC2_32_RRR1_MADDM_H_LU
:
6371 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6372 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6374 case OPC2_32_RRR1_MADDM_H_UL
:
6375 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6376 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6378 case OPC2_32_RRR1_MADDM_H_UU
:
6379 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6380 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6382 case OPC2_32_RRR1_MADDMS_H_LL
:
6383 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6384 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6386 case OPC2_32_RRR1_MADDMS_H_LU
:
6387 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6388 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6390 case OPC2_32_RRR1_MADDMS_H_UL
:
6391 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6392 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6394 case OPC2_32_RRR1_MADDMS_H_UU
:
6395 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6396 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6398 case OPC2_32_RRR1_MADDR_H_LL
:
6399 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6400 cpu_gpr_d
[r2
], n
, MODE_LL
);
6402 case OPC2_32_RRR1_MADDR_H_LU
:
6403 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6404 cpu_gpr_d
[r2
], n
, MODE_LU
);
6406 case OPC2_32_RRR1_MADDR_H_UL
:
6407 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6408 cpu_gpr_d
[r2
], n
, MODE_UL
);
6410 case OPC2_32_RRR1_MADDR_H_UU
:
6411 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6412 cpu_gpr_d
[r2
], n
, MODE_UU
);
6414 case OPC2_32_RRR1_MADDRS_H_LL
:
6415 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6416 cpu_gpr_d
[r2
], n
, MODE_LL
);
6418 case OPC2_32_RRR1_MADDRS_H_LU
:
6419 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6420 cpu_gpr_d
[r2
], n
, MODE_LU
);
6422 case OPC2_32_RRR1_MADDRS_H_UL
:
6423 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6424 cpu_gpr_d
[r2
], n
, MODE_UL
);
6426 case OPC2_32_RRR1_MADDRS_H_UU
:
6427 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6428 cpu_gpr_d
[r2
], n
, MODE_UU
);
6433 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
6436 uint32_t r1
, r2
, r3
, r4
, n
;
6439 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6440 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6441 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6442 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6443 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6444 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6446 temp
= tcg_const_i32(n
);
6447 temp2
= tcg_temp_new();
6450 case OPC2_32_RRR1_MADD_Q_32
:
6451 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6452 cpu_gpr_d
[r2
], n
, 32, env
);
6454 case OPC2_32_RRR1_MADD_Q_64
:
6455 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6456 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6459 case OPC2_32_RRR1_MADD_Q_32_L
:
6460 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6461 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6464 case OPC2_32_RRR1_MADD_Q_64_L
:
6465 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6466 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6467 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6470 case OPC2_32_RRR1_MADD_Q_32_U
:
6471 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6472 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6475 case OPC2_32_RRR1_MADD_Q_64_U
:
6476 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6477 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6478 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6481 case OPC2_32_RRR1_MADD_Q_32_LL
:
6482 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6483 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6484 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6486 case OPC2_32_RRR1_MADD_Q_64_LL
:
6487 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6488 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6489 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6490 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6492 case OPC2_32_RRR1_MADD_Q_32_UU
:
6493 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6494 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6495 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6497 case OPC2_32_RRR1_MADD_Q_64_UU
:
6498 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6499 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6500 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6501 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6503 case OPC2_32_RRR1_MADDS_Q_32
:
6504 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6505 cpu_gpr_d
[r2
], n
, 32);
6507 case OPC2_32_RRR1_MADDS_Q_64
:
6508 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6509 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6512 case OPC2_32_RRR1_MADDS_Q_32_L
:
6513 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6514 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6517 case OPC2_32_RRR1_MADDS_Q_64_L
:
6518 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6519 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6520 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6523 case OPC2_32_RRR1_MADDS_Q_32_U
:
6524 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6525 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6528 case OPC2_32_RRR1_MADDS_Q_64_U
:
6529 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6530 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6531 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6534 case OPC2_32_RRR1_MADDS_Q_32_LL
:
6535 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6536 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6537 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6539 case OPC2_32_RRR1_MADDS_Q_64_LL
:
6540 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6541 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6542 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6543 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6545 case OPC2_32_RRR1_MADDS_Q_32_UU
:
6546 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6547 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6548 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6550 case OPC2_32_RRR1_MADDS_Q_64_UU
:
6551 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6552 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6553 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6554 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6556 case OPC2_32_RRR1_MADDR_H_64_UL
:
6557 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6558 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
6560 case OPC2_32_RRR1_MADDRS_H_64_UL
:
6561 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6562 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
6564 case OPC2_32_RRR1_MADDR_Q_32_LL
:
6565 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6566 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6567 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6569 case OPC2_32_RRR1_MADDR_Q_32_UU
:
6570 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6571 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6572 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6574 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
6575 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6576 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6577 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6579 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
6580 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6581 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6582 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6585 tcg_temp_free(temp
);
6586 tcg_temp_free(temp2
);
6589 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
6592 uint32_t r1
, r2
, r3
, r4
, n
;
6594 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6595 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6596 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6597 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6598 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6599 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6602 case OPC2_32_RRR1_MADDSU_H_32_LL
:
6603 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6604 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6606 case OPC2_32_RRR1_MADDSU_H_32_LU
:
6607 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6608 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6610 case OPC2_32_RRR1_MADDSU_H_32_UL
:
6611 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6612 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6614 case OPC2_32_RRR1_MADDSU_H_32_UU
:
6615 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6616 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6618 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
6619 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6620 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6623 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
6624 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6625 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6628 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
6629 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6630 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6633 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
6634 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6635 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6638 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
6639 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6640 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6643 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
6644 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6645 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6648 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
6649 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6650 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6653 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
6654 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6655 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6658 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
6659 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6660 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6663 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
6664 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6665 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6668 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
6669 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6670 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6673 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
6674 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6675 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6678 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
6679 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6680 cpu_gpr_d
[r2
], n
, MODE_LL
);
6682 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
6683 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6684 cpu_gpr_d
[r2
], n
, MODE_LU
);
6686 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
6687 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6688 cpu_gpr_d
[r2
], n
, MODE_UL
);
6690 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
6691 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6692 cpu_gpr_d
[r2
], n
, MODE_UU
);
6694 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
6695 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6696 cpu_gpr_d
[r2
], n
, MODE_LL
);
6698 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
6699 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6700 cpu_gpr_d
[r2
], n
, MODE_LU
);
6702 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
6703 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6704 cpu_gpr_d
[r2
], n
, MODE_UL
);
6706 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
6707 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6708 cpu_gpr_d
[r2
], n
, MODE_UU
);
6713 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6716 uint32_t r1
, r2
, r3
, r4
, n
;
6718 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6719 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6720 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6721 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6722 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6723 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6726 case OPC2_32_RRR1_MSUB_H_LL
:
6727 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6728 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6730 case OPC2_32_RRR1_MSUB_H_LU
:
6731 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6732 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6734 case OPC2_32_RRR1_MSUB_H_UL
:
6735 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6736 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6738 case OPC2_32_RRR1_MSUB_H_UU
:
6739 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6740 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6742 case OPC2_32_RRR1_MSUBS_H_LL
:
6743 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6744 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6746 case OPC2_32_RRR1_MSUBS_H_LU
:
6747 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6748 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6750 case OPC2_32_RRR1_MSUBS_H_UL
:
6751 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6752 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6754 case OPC2_32_RRR1_MSUBS_H_UU
:
6755 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6756 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6758 case OPC2_32_RRR1_MSUBM_H_LL
:
6759 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6760 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6762 case OPC2_32_RRR1_MSUBM_H_LU
:
6763 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6764 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6766 case OPC2_32_RRR1_MSUBM_H_UL
:
6767 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6768 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6770 case OPC2_32_RRR1_MSUBM_H_UU
:
6771 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6772 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6774 case OPC2_32_RRR1_MSUBMS_H_LL
:
6775 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6776 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6778 case OPC2_32_RRR1_MSUBMS_H_LU
:
6779 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6780 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6782 case OPC2_32_RRR1_MSUBMS_H_UL
:
6783 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6784 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6786 case OPC2_32_RRR1_MSUBMS_H_UU
:
6787 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6788 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6790 case OPC2_32_RRR1_MSUBR_H_LL
:
6791 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6792 cpu_gpr_d
[r2
], n
, MODE_LL
);
6794 case OPC2_32_RRR1_MSUBR_H_LU
:
6795 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6796 cpu_gpr_d
[r2
], n
, MODE_LU
);
6798 case OPC2_32_RRR1_MSUBR_H_UL
:
6799 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6800 cpu_gpr_d
[r2
], n
, MODE_UL
);
6802 case OPC2_32_RRR1_MSUBR_H_UU
:
6803 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6804 cpu_gpr_d
[r2
], n
, MODE_UU
);
6806 case OPC2_32_RRR1_MSUBRS_H_LL
:
6807 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6808 cpu_gpr_d
[r2
], n
, MODE_LL
);
6810 case OPC2_32_RRR1_MSUBRS_H_LU
:
6811 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6812 cpu_gpr_d
[r2
], n
, MODE_LU
);
6814 case OPC2_32_RRR1_MSUBRS_H_UL
:
6815 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6816 cpu_gpr_d
[r2
], n
, MODE_UL
);
6818 case OPC2_32_RRR1_MSUBRS_H_UU
:
6819 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6820 cpu_gpr_d
[r2
], n
, MODE_UU
);
6825 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
6829 int32_t address
, const16
;
6832 TCGv temp
, temp2
, temp3
;
6834 op1
= MASK_OP_MAJOR(ctx
->opcode
);
6836 /* handle JNZ.T opcode only being 7 bit long */
6837 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
6838 op1
= OPCM_32_BRN_JTT
;
6843 case OPCM_32_ABS_LDW
:
6844 decode_abs_ldw(env
, ctx
);
6846 case OPCM_32_ABS_LDB
:
6847 decode_abs_ldb(env
, ctx
);
6849 case OPCM_32_ABS_LDMST_SWAP
:
6850 decode_abs_ldst_swap(env
, ctx
);
6852 case OPCM_32_ABS_LDST_CONTEXT
:
6853 decode_abs_ldst_context(env
, ctx
);
6855 case OPCM_32_ABS_STORE
:
6856 decode_abs_store(env
, ctx
);
6858 case OPCM_32_ABS_STOREB_H
:
6859 decode_abs_storeb_h(env
, ctx
);
6861 case OPC1_32_ABS_STOREQ
:
6862 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6863 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6864 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6865 temp2
= tcg_temp_new();
6867 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
6868 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
6870 tcg_temp_free(temp2
);
6871 tcg_temp_free(temp
);
6873 case OPC1_32_ABS_LD_Q
:
6874 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6875 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6876 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6878 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
6879 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
6881 tcg_temp_free(temp
);
6883 case OPC1_32_ABS_LEA
:
6884 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6885 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6886 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
6889 case OPC1_32_ABSB_ST_T
:
6890 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6891 b
= MASK_OP_ABSB_B(ctx
->opcode
);
6892 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
6894 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6895 temp2
= tcg_temp_new();
6897 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
6898 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
6899 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
6900 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
6902 tcg_temp_free(temp
);
6903 tcg_temp_free(temp2
);
6906 case OPC1_32_B_CALL
:
6907 case OPC1_32_B_CALLA
:
6912 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
6913 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
6916 case OPCM_32_BIT_ANDACC
:
6917 decode_bit_andacc(env
, ctx
);
6919 case OPCM_32_BIT_LOGICAL_T1
:
6920 decode_bit_logical_t(env
, ctx
);
6922 case OPCM_32_BIT_INSERT
:
6923 decode_bit_insert(env
, ctx
);
6925 case OPCM_32_BIT_LOGICAL_T2
:
6926 decode_bit_logical_t2(env
, ctx
);
6928 case OPCM_32_BIT_ORAND
:
6929 decode_bit_orand(env
, ctx
);
6931 case OPCM_32_BIT_SH_LOGIC1
:
6932 decode_bit_sh_logic1(env
, ctx
);
6934 case OPCM_32_BIT_SH_LOGIC2
:
6935 decode_bit_sh_logic2(env
, ctx
);
6938 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
6939 decode_bo_addrmode_post_pre_base(env
, ctx
);
6941 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
6942 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
6944 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
6945 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
6947 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
6948 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
6950 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
6951 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
6953 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
6954 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
6957 case OPC1_32_BOL_LD_A_LONGOFF
:
6958 case OPC1_32_BOL_LD_W_LONGOFF
:
6959 case OPC1_32_BOL_LEA_LONGOFF
:
6960 case OPC1_32_BOL_ST_W_LONGOFF
:
6961 case OPC1_32_BOL_ST_A_LONGOFF
:
6962 case OPC1_32_BOL_LD_B_LONGOFF
:
6963 case OPC1_32_BOL_LD_BU_LONGOFF
:
6964 case OPC1_32_BOL_LD_H_LONGOFF
:
6965 case OPC1_32_BOL_LD_HU_LONGOFF
:
6966 case OPC1_32_BOL_ST_B_LONGOFF
:
6967 case OPC1_32_BOL_ST_H_LONGOFF
:
6968 decode_bol_opc(env
, ctx
, op1
);
6971 case OPCM_32_BRC_EQ_NEQ
:
6972 case OPCM_32_BRC_GE
:
6973 case OPCM_32_BRC_JLT
:
6974 case OPCM_32_BRC_JNE
:
6975 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
6976 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
6977 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
6978 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
6981 case OPCM_32_BRN_JTT
:
6982 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
6983 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
6984 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
6987 case OPCM_32_BRR_EQ_NEQ
:
6988 case OPCM_32_BRR_ADDR_EQ_NEQ
:
6989 case OPCM_32_BRR_GE
:
6990 case OPCM_32_BRR_JLT
:
6991 case OPCM_32_BRR_JNE
:
6992 case OPCM_32_BRR_JNZ
:
6993 case OPCM_32_BRR_LOOP
:
6994 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
6995 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
6996 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
6997 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
7000 case OPCM_32_RC_LOGICAL_SHIFT
:
7001 decode_rc_logical_shift(env
, ctx
);
7003 case OPCM_32_RC_ACCUMULATOR
:
7004 decode_rc_accumulator(env
, ctx
);
7006 case OPCM_32_RC_SERVICEROUTINE
:
7007 decode_rc_serviceroutine(env
, ctx
);
7009 case OPCM_32_RC_MUL
:
7010 decode_rc_mul(env
, ctx
);
7013 case OPCM_32_RCPW_MASK_INSERT
:
7014 decode_rcpw_insert(env
, ctx
);
7017 case OPC1_32_RCRR_INSERT
:
7018 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
7019 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
7020 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
7021 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
7022 temp
= tcg_const_i32(const16
);
7023 temp2
= tcg_temp_new(); /* width*/
7024 temp3
= tcg_temp_new(); /* pos */
7026 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
7027 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
7029 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
7031 tcg_temp_free(temp
);
7032 tcg_temp_free(temp2
);
7033 tcg_temp_free(temp3
);
7036 case OPCM_32_RCRW_MASK_INSERT
:
7037 decode_rcrw_insert(env
, ctx
);
7040 case OPCM_32_RCR_COND_SELECT
:
7041 decode_rcr_cond_select(env
, ctx
);
7043 case OPCM_32_RCR_MADD
:
7044 decode_rcr_madd(env
, ctx
);
7046 case OPCM_32_RCR_MSUB
:
7047 decode_rcr_msub(env
, ctx
);
7050 case OPC1_32_RLC_ADDI
:
7051 case OPC1_32_RLC_ADDIH
:
7052 case OPC1_32_RLC_ADDIH_A
:
7053 case OPC1_32_RLC_MFCR
:
7054 case OPC1_32_RLC_MOV
:
7055 case OPC1_32_RLC_MOV_64
:
7056 case OPC1_32_RLC_MOV_U
:
7057 case OPC1_32_RLC_MOV_H
:
7058 case OPC1_32_RLC_MOVH_A
:
7059 case OPC1_32_RLC_MTCR
:
7060 decode_rlc_opc(env
, ctx
, op1
);
7063 case OPCM_32_RR_ACCUMULATOR
:
7064 decode_rr_accumulator(env
, ctx
);
7066 case OPCM_32_RR_LOGICAL_SHIFT
:
7067 decode_rr_logical_shift(env
, ctx
);
7069 case OPCM_32_RR_ADDRESS
:
7070 decode_rr_address(env
, ctx
);
7072 case OPCM_32_RR_IDIRECT
:
7073 decode_rr_idirect(env
, ctx
);
7075 case OPCM_32_RR_DIVIDE
:
7076 decode_rr_divide(env
, ctx
);
7079 case OPCM_32_RR1_MUL
:
7080 decode_rr1_mul(env
, ctx
);
7082 case OPCM_32_RR1_MULQ
:
7083 decode_rr1_mulq(env
, ctx
);
7086 case OPCM_32_RR2_MUL
:
7087 decode_rr2_mul(env
, ctx
);
7090 case OPCM_32_RRPW_EXTRACT_INSERT
:
7091 decode_rrpw_extract_insert(env
, ctx
);
7093 case OPC1_32_RRPW_DEXTR
:
7094 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
7095 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
7096 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
7097 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
7099 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
7101 temp
= tcg_temp_new();
7102 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], const16
);
7103 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 32 - const16
);
7104 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
7105 tcg_temp_free(temp
);
7109 case OPCM_32_RRR_COND_SELECT
:
7110 decode_rrr_cond_select(env
, ctx
);
7112 case OPCM_32_RRR_DIVIDE
:
7113 decode_rrr_divide(env
, ctx
);
7115 case OPCM_32_RRR2_MADD
:
7116 decode_rrr2_madd(env
, ctx
);
7118 case OPCM_32_RRR2_MSUB
:
7119 decode_rrr2_msub(env
, ctx
);
7122 case OPCM_32_RRR1_MADD
:
7123 decode_rrr1_madd(env
, ctx
);
7125 case OPCM_32_RRR1_MADDQ_H
:
7126 decode_rrr1_maddq_h(env
, ctx
);
7128 case OPCM_32_RRR1_MADDSU_H
:
7129 decode_rrr1_maddsu_h(env
, ctx
);
7131 case OPCM_32_RRR1_MSUB_H
:
7132 decode_rrr1_msub(env
, ctx
);
7137 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
7139 /* 16-Bit Instruction */
7140 if ((ctx
->opcode
& 0x1) == 0) {
7141 ctx
->next_pc
= ctx
->pc
+ 2;
7142 decode_16Bit_opc(env
, ctx
);
7143 /* 32-Bit Instruction */
7145 ctx
->next_pc
= ctx
->pc
+ 4;
7146 decode_32Bit_opc(env
, ctx
);
7151 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
7154 CPUState
*cs
= CPU(cpu
);
7155 CPUTriCoreState
*env
= &cpu
->env
;
7157 target_ulong pc_start
;
7161 qemu_log("search pc %d\n", search_pc
);
7169 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
7170 ctx
.bstate
= BS_NONE
;
7171 ctx
.mem_idx
= cpu_mmu_index(env
);
7173 tcg_clear_temp_count();
7175 while (ctx
.bstate
== BS_NONE
) {
7176 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
7177 decode_opc(env
, &ctx
, 0);
7181 if (tcg_op_buf_full()) {
7182 gen_save_pc(ctx
.next_pc
);
7187 gen_save_pc(ctx
.next_pc
);
7191 ctx
.pc
= ctx
.next_pc
;
7194 gen_tb_end(tb
, num_insns
);
7196 printf("done_generating search pc\n");
7198 tb
->size
= ctx
.pc
- pc_start
;
7199 tb
->icount
= num_insns
;
7201 if (tcg_check_temp_count()) {
7202 printf("LEAK at %08x\n", env
->PC
);
7206 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
7207 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
7208 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
7215 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
7217 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
7221 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
7223 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
7227 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
7229 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
7237 void cpu_state_reset(CPUTriCoreState
*env
)
7239 /* Reset Regs to Default Value */
7243 static void tricore_tcg_init_csfr(void)
7245 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
7246 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
7247 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
7248 offsetof(CPUTriCoreState
, PSW
), "PSW");
7249 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
7250 offsetof(CPUTriCoreState
, PC
), "PC");
7251 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
7252 offsetof(CPUTriCoreState
, ICR
), "ICR");
7255 void tricore_tcg_init(void)
7262 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
7264 for (i
= 0 ; i
< 16 ; i
++) {
7265 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
7266 offsetof(CPUTriCoreState
, gpr_a
[i
]),
7269 for (i
= 0 ; i
< 16 ; i
++) {
7270 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
7271 offsetof(CPUTriCoreState
, gpr_d
[i
]),
7274 tricore_tcg_init_csfr();
7275 /* init PSW flag cache */
7276 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
7277 offsetof(CPUTriCoreState
, PSW_USB_C
),
7279 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
7280 offsetof(CPUTriCoreState
, PSW_USB_V
),
7282 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
7283 offsetof(CPUTriCoreState
, PSW_USB_SV
),
7285 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
7286 offsetof(CPUTriCoreState
, PSW_USB_AV
),
7288 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
7289 offsetof(CPUTriCoreState
, PSW_USB_SAV
),