2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "tricore-opcodes.h"
41 static TCGv cpu_gpr_a
[16];
42 static TCGv cpu_gpr_d
[16];
44 static TCGv cpu_PSW_C
;
45 static TCGv cpu_PSW_V
;
46 static TCGv cpu_PSW_SV
;
47 static TCGv cpu_PSW_AV
;
48 static TCGv cpu_PSW_SAV
;
50 static TCGv_ptr cpu_env
;
52 #include "exec/gen-icount.h"
54 static const char *regnames_a
[] = {
55 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
56 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
57 "a12" , "a13" , "a14" , "a15",
60 static const char *regnames_d
[] = {
61 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
62 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
63 "d12" , "d13" , "d14" , "d15",
66 typedef struct DisasContext
{
67 struct TranslationBlock
*tb
;
68 target_ulong pc
, saved_pc
, next_pc
;
70 int singlestep_enabled
;
71 /* Routine used to access memory */
73 uint32_t hflags
, saved_hflags
;
92 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
93 fprintf_function cpu_fprintf
, int flags
)
95 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
96 CPUTriCoreState
*env
= &cpu
->env
;
102 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
103 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
104 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
105 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
106 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
107 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
109 for (i
= 0; i
< 16; ++i
) {
111 cpu_fprintf(f
, "\nGPR A%02d:", i
);
113 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
115 for (i
= 0; i
< 16; ++i
) {
117 cpu_fprintf(f
, "\nGPR D%02d:", i
);
119 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
121 cpu_fprintf(f
, "\n");
125 * Functions to generate micro-ops
128 /* Makros for generating helpers */
130 #define gen_helper_1arg(name, arg) do { \
131 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
132 gen_helper_##name(cpu_env, helper_tmp); \
133 tcg_temp_free_i32(helper_tmp); \
136 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
137 TCGv arg00 = tcg_temp_new(); \
138 TCGv arg01 = tcg_temp_new(); \
139 TCGv arg11 = tcg_temp_new(); \
140 tcg_gen_sari_tl(arg00, arg0, 16); \
141 tcg_gen_ext16s_tl(arg01, arg0); \
142 tcg_gen_ext16s_tl(arg11, arg1); \
143 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
144 tcg_temp_free(arg00); \
145 tcg_temp_free(arg01); \
146 tcg_temp_free(arg11); \
149 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
150 TCGv arg00 = tcg_temp_new(); \
151 TCGv arg01 = tcg_temp_new(); \
152 TCGv arg10 = tcg_temp_new(); \
153 TCGv arg11 = tcg_temp_new(); \
154 tcg_gen_sari_tl(arg00, arg0, 16); \
155 tcg_gen_ext16s_tl(arg01, arg0); \
156 tcg_gen_sari_tl(arg11, arg1, 16); \
157 tcg_gen_ext16s_tl(arg10, arg1); \
158 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
159 tcg_temp_free(arg00); \
160 tcg_temp_free(arg01); \
161 tcg_temp_free(arg10); \
162 tcg_temp_free(arg11); \
165 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
166 TCGv arg00 = tcg_temp_new(); \
167 TCGv arg01 = tcg_temp_new(); \
168 TCGv arg10 = tcg_temp_new(); \
169 TCGv arg11 = tcg_temp_new(); \
170 tcg_gen_sari_tl(arg00, arg0, 16); \
171 tcg_gen_ext16s_tl(arg01, arg0); \
172 tcg_gen_sari_tl(arg10, arg1, 16); \
173 tcg_gen_ext16s_tl(arg11, arg1); \
174 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
175 tcg_temp_free(arg00); \
176 tcg_temp_free(arg01); \
177 tcg_temp_free(arg10); \
178 tcg_temp_free(arg11); \
181 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
182 TCGv arg00 = tcg_temp_new(); \
183 TCGv arg01 = tcg_temp_new(); \
184 TCGv arg11 = tcg_temp_new(); \
185 tcg_gen_sari_tl(arg01, arg0, 16); \
186 tcg_gen_ext16s_tl(arg00, arg0); \
187 tcg_gen_sari_tl(arg11, arg1, 16); \
188 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
189 tcg_temp_free(arg00); \
190 tcg_temp_free(arg01); \
191 tcg_temp_free(arg11); \
194 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
195 TCGv_i64 ret = tcg_temp_new_i64(); \
196 TCGv_i64 arg1 = tcg_temp_new_i64(); \
198 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
199 gen_helper_##name(ret, arg1, arg2); \
200 tcg_gen_extr_i64_i32(rl, rh, ret); \
202 tcg_temp_free_i64(ret); \
203 tcg_temp_free_i64(arg1); \
206 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
207 TCGv_i64 ret = tcg_temp_new_i64(); \
209 gen_helper_##name(ret, cpu_env, arg1, arg2); \
210 tcg_gen_extr_i64_i32(rl, rh, ret); \
212 tcg_temp_free_i64(ret); \
215 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
216 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
217 ((offset & 0x0fffff) << 1))
219 /* Functions for load/save to/from memory */
221 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
222 int16_t con
, TCGMemOp mop
)
224 TCGv temp
= tcg_temp_new();
225 tcg_gen_addi_tl(temp
, r2
, con
);
226 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
230 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
231 int16_t con
, TCGMemOp mop
)
233 TCGv temp
= tcg_temp_new();
234 tcg_gen_addi_tl(temp
, r2
, con
);
235 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
239 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
241 TCGv_i64 temp
= tcg_temp_new_i64();
243 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
244 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
246 tcg_temp_free_i64(temp
);
249 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
252 TCGv temp
= tcg_temp_new();
253 tcg_gen_addi_tl(temp
, base
, con
);
254 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
258 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
260 TCGv_i64 temp
= tcg_temp_new_i64();
262 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
263 /* write back to two 32 bit regs */
264 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
266 tcg_temp_free_i64(temp
);
269 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
272 TCGv temp
= tcg_temp_new();
273 tcg_gen_addi_tl(temp
, base
, con
);
274 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
278 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
281 TCGv temp
= tcg_temp_new();
282 tcg_gen_addi_tl(temp
, r2
, off
);
283 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
284 tcg_gen_mov_tl(r2
, temp
);
288 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
291 TCGv temp
= tcg_temp_new();
292 tcg_gen_addi_tl(temp
, r2
, off
);
293 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
294 tcg_gen_mov_tl(r2
, temp
);
298 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
299 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
301 TCGv temp
= tcg_temp_new();
302 TCGv temp2
= tcg_temp_new();
304 /* temp = (M(EA, word) */
305 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
306 /* temp = temp & ~E[a][63:32]) */
307 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
308 /* temp2 = (E[a][31:0] & E[a][63:32]); */
309 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
310 /* temp = temp | temp2; */
311 tcg_gen_or_tl(temp
, temp
, temp2
);
312 /* M(EA, word) = temp; */
313 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_temp_free(temp2
);
319 /* tmp = M(EA, word);
322 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
324 TCGv temp
= tcg_temp_new();
326 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
327 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
328 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
333 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
335 TCGv temp
= tcg_temp_new();
336 TCGv temp2
= tcg_temp_new();
337 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
338 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
339 cpu_gpr_d
[reg
], temp
);
340 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
341 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
344 tcg_temp_free(temp2
);
347 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
349 TCGv temp
= tcg_temp_new();
350 TCGv temp2
= tcg_temp_new();
351 TCGv temp3
= tcg_temp_new();
353 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
354 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
355 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
356 tcg_gen_or_tl(temp2
, temp2
, temp3
);
357 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
358 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
361 tcg_temp_free(temp2
);
362 tcg_temp_free(temp3
);
366 /* We generate loads and store to core special function register (csfr) through
367 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
368 makros R, A and E, which allow read-only, all and endinit protected access.
369 These makros also specify in which ISA version the csfr was introduced. */
370 #define R(ADDRESS, REG, FEATURE) \
372 if (tricore_feature(env, FEATURE)) { \
373 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
376 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
377 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
378 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
380 /* since we're caching PSW make this a special case */
381 if (offset
== 0xfe04) {
382 gen_helper_psw_read(ret
, cpu_env
);
393 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
394 since no execption occurs */
395 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
397 if (tricore_feature(env, FEATURE)) { \
398 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
401 /* Endinit protected registers
402 TODO: Since the endinit bit is in a register of a not yet implemented
403 watchdog device, we handle endinit protected registers like
404 all-access registers for now. */
405 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
406 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
409 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
410 /* since we're caching PSW make this a special case */
411 if (offset
== 0xfe04) {
412 gen_helper_psw_write(cpu_env
, r1
);
419 /* generate privilege trap */
423 /* Functions for arithmetic instructions */
425 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
427 TCGv t0
= tcg_temp_new_i32();
428 TCGv result
= tcg_temp_new_i32();
429 /* Addition and set V/SV bits */
430 tcg_gen_add_tl(result
, r1
, r2
);
432 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
433 tcg_gen_xor_tl(t0
, r1
, r2
);
434 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
436 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
437 /* Calc AV/SAV bits */
438 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
439 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
441 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
442 /* write back result */
443 tcg_gen_mov_tl(ret
, result
);
445 tcg_temp_free(result
);
450 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
452 TCGv temp
= tcg_temp_new();
453 TCGv_i64 t0
= tcg_temp_new_i64();
454 TCGv_i64 t1
= tcg_temp_new_i64();
455 TCGv_i64 result
= tcg_temp_new_i64();
457 tcg_gen_add_i64(result
, r1
, r2
);
459 tcg_gen_xor_i64(t1
, result
, r1
);
460 tcg_gen_xor_i64(t0
, r1
, r2
);
461 tcg_gen_andc_i64(t1
, t1
, t0
);
462 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
464 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
465 /* calc AV/SAV bits */
466 tcg_gen_extrh_i64_i32(temp
, result
);
467 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
468 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
470 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
471 /* write back result */
472 tcg_gen_mov_i64(ret
, result
);
475 tcg_temp_free_i64(result
);
476 tcg_temp_free_i64(t0
);
477 tcg_temp_free_i64(t1
);
481 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
482 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
483 void(*op2
)(TCGv
, TCGv
, TCGv
))
485 TCGv temp
= tcg_temp_new();
486 TCGv temp2
= tcg_temp_new();
487 TCGv temp3
= tcg_temp_new();
488 TCGv temp4
= tcg_temp_new();
490 (*op1
)(temp
, r1_low
, r2
);
492 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
493 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
494 if (op1
== tcg_gen_add_tl
) {
495 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
497 tcg_gen_and_tl(temp2
, temp2
, temp3
);
500 (*op2
)(temp3
, r1_high
, r3
);
502 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
503 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
504 if (op2
== tcg_gen_add_tl
) {
505 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
507 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
509 /* combine V0/V1 bits */
510 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
512 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
514 tcg_gen_mov_tl(ret_low
, temp
);
515 tcg_gen_mov_tl(ret_high
, temp3
);
517 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
518 tcg_gen_xor_tl(temp
, temp
, ret_low
);
519 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
520 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
521 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
523 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
526 tcg_temp_free(temp2
);
527 tcg_temp_free(temp3
);
528 tcg_temp_free(temp4
);
531 /* ret = r2 + (r1 * r3); */
532 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
534 TCGv_i64 t1
= tcg_temp_new_i64();
535 TCGv_i64 t2
= tcg_temp_new_i64();
536 TCGv_i64 t3
= tcg_temp_new_i64();
538 tcg_gen_ext_i32_i64(t1
, r1
);
539 tcg_gen_ext_i32_i64(t2
, r2
);
540 tcg_gen_ext_i32_i64(t3
, r3
);
542 tcg_gen_mul_i64(t1
, t1
, t3
);
543 tcg_gen_add_i64(t1
, t2
, t1
);
545 tcg_gen_extrl_i64_i32(ret
, t1
);
548 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
549 /* t1 < -0x80000000 */
550 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
551 tcg_gen_or_i64(t2
, t2
, t3
);
552 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
553 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
555 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
556 /* Calc AV/SAV bits */
557 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
558 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
560 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
562 tcg_temp_free_i64(t1
);
563 tcg_temp_free_i64(t2
);
564 tcg_temp_free_i64(t3
);
567 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
569 TCGv temp
= tcg_const_i32(con
);
570 gen_madd32_d(ret
, r1
, r2
, temp
);
575 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
578 TCGv t1
= tcg_temp_new();
579 TCGv t2
= tcg_temp_new();
580 TCGv t3
= tcg_temp_new();
581 TCGv t4
= tcg_temp_new();
583 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
584 /* only the add can overflow */
585 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
587 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
588 tcg_gen_xor_tl(t1
, r2_high
, t2
);
589 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
591 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
592 /* Calc AV/SAV bits */
593 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
594 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
596 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
597 /* write back the result */
598 tcg_gen_mov_tl(ret_low
, t3
);
599 tcg_gen_mov_tl(ret_high
, t4
);
608 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
611 TCGv_i64 t1
= tcg_temp_new_i64();
612 TCGv_i64 t2
= tcg_temp_new_i64();
613 TCGv_i64 t3
= tcg_temp_new_i64();
615 tcg_gen_extu_i32_i64(t1
, r1
);
616 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
617 tcg_gen_extu_i32_i64(t3
, r3
);
619 tcg_gen_mul_i64(t1
, t1
, t3
);
620 tcg_gen_add_i64(t2
, t2
, t1
);
621 /* write back result */
622 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
623 /* only the add overflows, if t2 < t1
625 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
626 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
627 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
629 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
630 /* Calc AV/SAV bits */
631 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
632 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
634 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
636 tcg_temp_free_i64(t1
);
637 tcg_temp_free_i64(t2
);
638 tcg_temp_free_i64(t3
);
642 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
645 TCGv temp
= tcg_const_i32(con
);
646 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
651 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
654 TCGv temp
= tcg_const_i32(con
);
655 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
660 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
661 TCGv r3
, uint32_t n
, uint32_t mode
)
663 TCGv temp
= tcg_const_i32(n
);
664 TCGv temp2
= tcg_temp_new();
665 TCGv_i64 temp64
= tcg_temp_new_i64();
668 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
671 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
674 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
677 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
680 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
681 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
682 tcg_gen_add_tl
, tcg_gen_add_tl
);
684 tcg_temp_free(temp2
);
685 tcg_temp_free_i64(temp64
);
689 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
690 TCGv r3
, uint32_t n
, uint32_t mode
)
692 TCGv temp
= tcg_const_i32(n
);
693 TCGv temp2
= tcg_temp_new();
694 TCGv_i64 temp64
= tcg_temp_new_i64();
697 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
700 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
703 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
706 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
709 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
710 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
711 tcg_gen_sub_tl
, tcg_gen_add_tl
);
713 tcg_temp_free(temp2
);
714 tcg_temp_free_i64(temp64
);
718 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
719 TCGv r3
, uint32_t n
, uint32_t mode
)
721 TCGv temp
= tcg_const_i32(n
);
722 TCGv_i64 temp64
= tcg_temp_new_i64();
723 TCGv_i64 temp64_2
= tcg_temp_new_i64();
724 TCGv_i64 temp64_3
= tcg_temp_new_i64();
727 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
730 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
733 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
736 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
739 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
740 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
741 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
742 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
743 tcg_gen_shli_i64(temp64
, temp64
, 16);
745 gen_add64_d(temp64_2
, temp64_3
, temp64
);
746 /* write back result */
747 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
750 tcg_temp_free_i64(temp64
);
751 tcg_temp_free_i64(temp64_2
);
752 tcg_temp_free_i64(temp64_3
);
755 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
758 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
759 TCGv r3
, uint32_t n
, uint32_t mode
)
761 TCGv temp
= tcg_const_i32(n
);
762 TCGv temp2
= tcg_temp_new();
763 TCGv temp3
= tcg_temp_new();
764 TCGv_i64 temp64
= tcg_temp_new_i64();
768 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
771 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
774 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
777 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
780 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
781 gen_adds(ret_low
, r1_low
, temp
);
782 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
783 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
784 gen_adds(ret_high
, r1_high
, temp2
);
786 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
787 /* combine av bits */
788 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
791 tcg_temp_free(temp2
);
792 tcg_temp_free(temp3
);
793 tcg_temp_free_i64(temp64
);
797 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
800 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
801 TCGv r3
, uint32_t n
, uint32_t mode
)
803 TCGv temp
= tcg_const_i32(n
);
804 TCGv temp2
= tcg_temp_new();
805 TCGv temp3
= tcg_temp_new();
806 TCGv_i64 temp64
= tcg_temp_new_i64();
810 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
813 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
816 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
819 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
822 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
823 gen_subs(ret_low
, r1_low
, temp
);
824 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
825 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
826 gen_adds(ret_high
, r1_high
, temp2
);
828 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
829 /* combine av bits */
830 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
833 tcg_temp_free(temp2
);
834 tcg_temp_free(temp3
);
835 tcg_temp_free_i64(temp64
);
840 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
841 TCGv r3
, uint32_t n
, uint32_t mode
)
843 TCGv temp
= tcg_const_i32(n
);
844 TCGv_i64 temp64
= tcg_temp_new_i64();
845 TCGv_i64 temp64_2
= tcg_temp_new_i64();
849 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
852 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
855 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
858 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
861 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
862 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
863 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
864 tcg_gen_shli_i64(temp64
, temp64
, 16);
865 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
867 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
868 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
871 tcg_temp_free_i64(temp64
);
872 tcg_temp_free_i64(temp64_2
);
877 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
878 TCGv r3
, uint32_t n
, uint32_t mode
)
880 TCGv temp
= tcg_const_i32(n
);
881 TCGv_i64 temp64
= tcg_temp_new_i64();
882 TCGv_i64 temp64_2
= tcg_temp_new_i64();
883 TCGv_i64 temp64_3
= tcg_temp_new_i64();
886 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
889 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
892 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
895 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
898 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
899 gen_add64_d(temp64_3
, temp64_2
, temp64
);
900 /* write back result */
901 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
904 tcg_temp_free_i64(temp64
);
905 tcg_temp_free_i64(temp64_2
);
906 tcg_temp_free_i64(temp64_3
);
910 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
911 TCGv r3
, uint32_t n
, uint32_t mode
)
913 TCGv temp
= tcg_const_i32(n
);
914 TCGv_i64 temp64
= tcg_temp_new_i64();
915 TCGv_i64 temp64_2
= tcg_temp_new_i64();
918 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
921 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
924 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
927 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
930 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
931 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
932 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
935 tcg_temp_free_i64(temp64
);
936 tcg_temp_free_i64(temp64_2
);
940 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
943 TCGv temp
= tcg_const_i32(n
);
944 TCGv_i64 temp64
= tcg_temp_new_i64();
947 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
950 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
953 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
956 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
959 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
962 tcg_temp_free_i64(temp64
);
966 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
968 TCGv temp
= tcg_temp_new();
969 TCGv temp2
= tcg_temp_new();
971 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
972 tcg_gen_shli_tl(temp
, r1
, 16);
973 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
976 tcg_temp_free(temp2
);
980 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
982 TCGv temp
= tcg_const_i32(n
);
983 TCGv temp2
= tcg_temp_new();
984 TCGv_i64 temp64
= tcg_temp_new_i64();
987 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
990 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
993 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
996 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
999 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1000 tcg_gen_shli_tl(temp
, r1
, 16);
1001 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
1003 tcg_temp_free(temp
);
1004 tcg_temp_free(temp2
);
1005 tcg_temp_free_i64(temp64
);
1010 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1011 uint32_t n
, uint32_t mode
)
1013 TCGv temp
= tcg_const_i32(n
);
1014 TCGv_i64 temp64
= tcg_temp_new_i64();
1017 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1020 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1023 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1026 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1029 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1031 tcg_temp_free(temp
);
1032 tcg_temp_free_i64(temp64
);
1036 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1038 TCGv temp
= tcg_temp_new();
1039 TCGv temp2
= tcg_temp_new();
1041 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1042 tcg_gen_shli_tl(temp
, r1
, 16);
1043 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1045 tcg_temp_free(temp
);
1046 tcg_temp_free(temp2
);
1050 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1052 TCGv temp
= tcg_const_i32(n
);
1053 TCGv temp2
= tcg_temp_new();
1054 TCGv_i64 temp64
= tcg_temp_new_i64();
1057 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1060 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1063 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1066 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1069 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1070 tcg_gen_shli_tl(temp
, r1
, 16);
1071 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1073 tcg_temp_free(temp
);
1074 tcg_temp_free(temp2
);
1075 tcg_temp_free_i64(temp64
);
1079 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1081 TCGv temp
= tcg_const_i32(n
);
1082 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1083 tcg_temp_free(temp
);
1087 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1089 TCGv temp
= tcg_const_i32(n
);
1090 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1091 tcg_temp_free(temp
);
1095 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1096 uint32_t up_shift
, CPUTriCoreState
*env
)
1098 TCGv temp
= tcg_temp_new();
1099 TCGv temp2
= tcg_temp_new();
1100 TCGv temp3
= tcg_temp_new();
1101 TCGv_i64 t1
= tcg_temp_new_i64();
1102 TCGv_i64 t2
= tcg_temp_new_i64();
1103 TCGv_i64 t3
= tcg_temp_new_i64();
1105 tcg_gen_ext_i32_i64(t2
, arg2
);
1106 tcg_gen_ext_i32_i64(t3
, arg3
);
1108 tcg_gen_mul_i64(t2
, t2
, t3
);
1109 tcg_gen_shli_i64(t2
, t2
, n
);
1111 tcg_gen_ext_i32_i64(t1
, arg1
);
1112 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1114 tcg_gen_add_i64(t3
, t1
, t2
);
1115 tcg_gen_extrl_i64_i32(temp3
, t3
);
1117 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1118 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1119 tcg_gen_or_i64(t1
, t1
, t2
);
1120 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1121 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1122 /* We produce an overflow on the host if the mul before was
1123 (0x80000000 * 0x80000000) << 1). If this is the
1124 case, we negate the ovf. */
1126 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1127 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1128 tcg_gen_and_tl(temp
, temp
, temp2
);
1129 tcg_gen_shli_tl(temp
, temp
, 31);
1130 /* negate v bit, if special condition */
1131 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1134 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1135 /* Calc AV/SAV bits */
1136 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1137 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1139 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1140 /* write back result */
1141 tcg_gen_mov_tl(ret
, temp3
);
1143 tcg_temp_free(temp
);
1144 tcg_temp_free(temp2
);
1145 tcg_temp_free(temp3
);
1146 tcg_temp_free_i64(t1
);
1147 tcg_temp_free_i64(t2
);
1148 tcg_temp_free_i64(t3
);
1152 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1154 TCGv temp
= tcg_temp_new();
1155 TCGv temp2
= tcg_temp_new();
1157 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1158 } else { /* n is expected to be 1 */
1159 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1160 tcg_gen_shli_tl(temp
, temp
, 1);
1161 /* catch special case r1 = r2 = 0x8000 */
1162 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1163 tcg_gen_sub_tl(temp
, temp
, temp2
);
1165 gen_add_d(ret
, arg1
, temp
);
1167 tcg_temp_free(temp
);
1168 tcg_temp_free(temp2
);
1172 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1174 TCGv temp
= tcg_temp_new();
1175 TCGv temp2
= tcg_temp_new();
1177 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1178 } else { /* n is expected to be 1 */
1179 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1180 tcg_gen_shli_tl(temp
, temp
, 1);
1181 /* catch special case r1 = r2 = 0x8000 */
1182 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1183 tcg_gen_sub_tl(temp
, temp
, temp2
);
1185 gen_adds(ret
, arg1
, temp
);
1187 tcg_temp_free(temp
);
1188 tcg_temp_free(temp2
);
1192 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1193 TCGv arg3
, uint32_t n
)
1195 TCGv temp
= tcg_temp_new();
1196 TCGv temp2
= tcg_temp_new();
1197 TCGv_i64 t1
= tcg_temp_new_i64();
1198 TCGv_i64 t2
= tcg_temp_new_i64();
1199 TCGv_i64 t3
= tcg_temp_new_i64();
1202 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1203 } else { /* n is expected to be 1 */
1204 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1205 tcg_gen_shli_tl(temp
, temp
, 1);
1206 /* catch special case r1 = r2 = 0x8000 */
1207 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1208 tcg_gen_sub_tl(temp
, temp
, temp2
);
1210 tcg_gen_ext_i32_i64(t2
, temp
);
1211 tcg_gen_shli_i64(t2
, t2
, 16);
1212 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1213 gen_add64_d(t3
, t1
, t2
);
1214 /* write back result */
1215 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1217 tcg_temp_free_i64(t1
);
1218 tcg_temp_free_i64(t2
);
1219 tcg_temp_free_i64(t3
);
1220 tcg_temp_free(temp
);
1221 tcg_temp_free(temp2
);
1225 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1226 TCGv arg3
, uint32_t n
)
1228 TCGv temp
= tcg_temp_new();
1229 TCGv temp2
= tcg_temp_new();
1230 TCGv_i64 t1
= tcg_temp_new_i64();
1231 TCGv_i64 t2
= tcg_temp_new_i64();
1234 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1235 } else { /* n is expected to be 1 */
1236 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1237 tcg_gen_shli_tl(temp
, temp
, 1);
1238 /* catch special case r1 = r2 = 0x8000 */
1239 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1240 tcg_gen_sub_tl(temp
, temp
, temp2
);
1242 tcg_gen_ext_i32_i64(t2
, temp
);
1243 tcg_gen_shli_i64(t2
, t2
, 16);
1244 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1246 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1247 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1249 tcg_temp_free(temp
);
1250 tcg_temp_free(temp2
);
1251 tcg_temp_free_i64(t1
);
1252 tcg_temp_free_i64(t2
);
1256 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1257 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1259 TCGv_i64 t1
= tcg_temp_new_i64();
1260 TCGv_i64 t2
= tcg_temp_new_i64();
1261 TCGv_i64 t3
= tcg_temp_new_i64();
1262 TCGv_i64 t4
= tcg_temp_new_i64();
1265 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1266 tcg_gen_ext_i32_i64(t2
, arg2
);
1267 tcg_gen_ext_i32_i64(t3
, arg3
);
1269 tcg_gen_mul_i64(t2
, t2
, t3
);
1271 tcg_gen_shli_i64(t2
, t2
, 1);
1273 tcg_gen_add_i64(t4
, t1
, t2
);
1275 tcg_gen_xor_i64(t3
, t4
, t1
);
1276 tcg_gen_xor_i64(t2
, t1
, t2
);
1277 tcg_gen_andc_i64(t3
, t3
, t2
);
1278 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1279 /* We produce an overflow on the host if the mul before was
1280 (0x80000000 * 0x80000000) << 1). If this is the
1281 case, we negate the ovf. */
1283 temp
= tcg_temp_new();
1284 temp2
= tcg_temp_new();
1285 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1286 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1287 tcg_gen_and_tl(temp
, temp
, temp2
);
1288 tcg_gen_shli_tl(temp
, temp
, 31);
1289 /* negate v bit, if special condition */
1290 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1292 tcg_temp_free(temp
);
1293 tcg_temp_free(temp2
);
1295 /* write back result */
1296 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1298 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1299 /* Calc AV/SAV bits */
1300 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1301 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1303 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1305 tcg_temp_free_i64(t1
);
1306 tcg_temp_free_i64(t2
);
1307 tcg_temp_free_i64(t3
);
1308 tcg_temp_free_i64(t4
);
1312 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1315 TCGv_i64 t1
= tcg_temp_new_i64();
1316 TCGv_i64 t2
= tcg_temp_new_i64();
1317 TCGv_i64 t3
= tcg_temp_new_i64();
1319 tcg_gen_ext_i32_i64(t1
, arg1
);
1320 tcg_gen_ext_i32_i64(t2
, arg2
);
1321 tcg_gen_ext_i32_i64(t3
, arg3
);
1323 tcg_gen_mul_i64(t2
, t2
, t3
);
1324 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1326 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1328 tcg_temp_free_i64(t1
);
1329 tcg_temp_free_i64(t2
);
1330 tcg_temp_free_i64(t3
);
1334 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1335 TCGv arg3
, uint32_t n
)
1337 TCGv_i64 r1
= tcg_temp_new_i64();
1338 TCGv temp
= tcg_const_i32(n
);
1340 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1341 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1342 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1344 tcg_temp_free_i64(r1
);
1345 tcg_temp_free(temp
);
1347 /* ret = r2 - (r1 * r3); */
1348 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1350 TCGv_i64 t1
= tcg_temp_new_i64();
1351 TCGv_i64 t2
= tcg_temp_new_i64();
1352 TCGv_i64 t3
= tcg_temp_new_i64();
1354 tcg_gen_ext_i32_i64(t1
, r1
);
1355 tcg_gen_ext_i32_i64(t2
, r2
);
1356 tcg_gen_ext_i32_i64(t3
, r3
);
1358 tcg_gen_mul_i64(t1
, t1
, t3
);
1359 tcg_gen_sub_i64(t1
, t2
, t1
);
1361 tcg_gen_extrl_i64_i32(ret
, t1
);
1364 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1365 /* result < -0x80000000 */
1366 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1367 tcg_gen_or_i64(t2
, t2
, t3
);
1368 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1369 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1372 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1373 /* Calc AV/SAV bits */
1374 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1375 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1377 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1379 tcg_temp_free_i64(t1
);
1380 tcg_temp_free_i64(t2
);
1381 tcg_temp_free_i64(t3
);
1384 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1386 TCGv temp
= tcg_const_i32(con
);
1387 gen_msub32_d(ret
, r1
, r2
, temp
);
1388 tcg_temp_free(temp
);
1392 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1395 TCGv t1
= tcg_temp_new();
1396 TCGv t2
= tcg_temp_new();
1397 TCGv t3
= tcg_temp_new();
1398 TCGv t4
= tcg_temp_new();
1400 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1401 /* only the sub can overflow */
1402 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1404 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1405 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1406 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1408 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1409 /* Calc AV/SAV bits */
1410 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1411 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1413 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1414 /* write back the result */
1415 tcg_gen_mov_tl(ret_low
, t3
);
1416 tcg_gen_mov_tl(ret_high
, t4
);
1425 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1428 TCGv temp
= tcg_const_i32(con
);
1429 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1430 tcg_temp_free(temp
);
1434 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1437 TCGv_i64 t1
= tcg_temp_new_i64();
1438 TCGv_i64 t2
= tcg_temp_new_i64();
1439 TCGv_i64 t3
= tcg_temp_new_i64();
1441 tcg_gen_extu_i32_i64(t1
, r1
);
1442 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1443 tcg_gen_extu_i32_i64(t3
, r3
);
1445 tcg_gen_mul_i64(t1
, t1
, t3
);
1446 tcg_gen_sub_i64(t3
, t2
, t1
);
1447 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1448 /* calc V bit, only the sub can overflow, if t1 > t2 */
1449 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1450 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1451 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1453 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1454 /* Calc AV/SAV bits */
1455 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1456 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1458 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1460 tcg_temp_free_i64(t1
);
1461 tcg_temp_free_i64(t2
);
1462 tcg_temp_free_i64(t3
);
1466 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1469 TCGv temp
= tcg_const_i32(con
);
1470 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1471 tcg_temp_free(temp
);
1474 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1476 TCGv temp
= tcg_const_i32(r2
);
1477 gen_add_d(ret
, r1
, temp
);
1478 tcg_temp_free(temp
);
1480 /* calculate the carry bit too */
1481 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1483 TCGv t0
= tcg_temp_new_i32();
1484 TCGv result
= tcg_temp_new_i32();
1486 tcg_gen_movi_tl(t0
, 0);
1487 /* Addition and set C/V/SV bits */
1488 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1490 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1491 tcg_gen_xor_tl(t0
, r1
, r2
);
1492 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1494 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1495 /* Calc AV/SAV bits */
1496 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1497 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1499 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1500 /* write back result */
1501 tcg_gen_mov_tl(ret
, result
);
1503 tcg_temp_free(result
);
1507 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1509 TCGv temp
= tcg_const_i32(con
);
1510 gen_add_CC(ret
, r1
, temp
);
1511 tcg_temp_free(temp
);
1514 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1516 TCGv carry
= tcg_temp_new_i32();
1517 TCGv t0
= tcg_temp_new_i32();
1518 TCGv result
= tcg_temp_new_i32();
1520 tcg_gen_movi_tl(t0
, 0);
1521 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1522 /* Addition, carry and set C/V/SV bits */
1523 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1524 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1526 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1527 tcg_gen_xor_tl(t0
, r1
, r2
);
1528 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1530 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1531 /* Calc AV/SAV bits */
1532 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1533 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1535 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1536 /* write back result */
1537 tcg_gen_mov_tl(ret
, result
);
1539 tcg_temp_free(result
);
1541 tcg_temp_free(carry
);
1544 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1546 TCGv temp
= tcg_const_i32(con
);
1547 gen_addc_CC(ret
, r1
, temp
);
1548 tcg_temp_free(temp
);
1551 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1554 TCGv temp
= tcg_temp_new();
1555 TCGv temp2
= tcg_temp_new();
1556 TCGv result
= tcg_temp_new();
1557 TCGv mask
= tcg_temp_new();
1558 TCGv t0
= tcg_const_i32(0);
1560 /* create mask for sticky bits */
1561 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1562 tcg_gen_shli_tl(mask
, mask
, 31);
1564 tcg_gen_add_tl(result
, r1
, r2
);
1566 tcg_gen_xor_tl(temp
, result
, r1
);
1567 tcg_gen_xor_tl(temp2
, r1
, r2
);
1568 tcg_gen_andc_tl(temp
, temp
, temp2
);
1569 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1571 tcg_gen_and_tl(temp
, temp
, mask
);
1572 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1574 tcg_gen_add_tl(temp
, result
, result
);
1575 tcg_gen_xor_tl(temp
, temp
, result
);
1576 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1578 tcg_gen_and_tl(temp
, temp
, mask
);
1579 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1580 /* write back result */
1581 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1584 tcg_temp_free(temp
);
1585 tcg_temp_free(temp2
);
1586 tcg_temp_free(result
);
1587 tcg_temp_free(mask
);
1590 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1593 TCGv temp
= tcg_const_i32(r2
);
1594 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1595 tcg_temp_free(temp
);
1598 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1600 TCGv temp
= tcg_temp_new_i32();
1601 TCGv result
= tcg_temp_new_i32();
1603 tcg_gen_sub_tl(result
, r1
, r2
);
1605 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1606 tcg_gen_xor_tl(temp
, r1
, r2
);
1607 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1609 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1611 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1612 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1614 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1615 /* write back result */
1616 tcg_gen_mov_tl(ret
, result
);
1618 tcg_temp_free(temp
);
1619 tcg_temp_free(result
);
1623 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1625 TCGv temp
= tcg_temp_new();
1626 TCGv_i64 t0
= tcg_temp_new_i64();
1627 TCGv_i64 t1
= tcg_temp_new_i64();
1628 TCGv_i64 result
= tcg_temp_new_i64();
1630 tcg_gen_sub_i64(result
, r1
, r2
);
1632 tcg_gen_xor_i64(t1
, result
, r1
);
1633 tcg_gen_xor_i64(t0
, r1
, r2
);
1634 tcg_gen_and_i64(t1
, t1
, t0
);
1635 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1637 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1638 /* calc AV/SAV bits */
1639 tcg_gen_extrh_i64_i32(temp
, result
);
1640 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1641 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1643 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1644 /* write back result */
1645 tcg_gen_mov_i64(ret
, result
);
1647 tcg_temp_free(temp
);
1648 tcg_temp_free_i64(result
);
1649 tcg_temp_free_i64(t0
);
1650 tcg_temp_free_i64(t1
);
1653 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1655 TCGv result
= tcg_temp_new();
1656 TCGv temp
= tcg_temp_new();
1658 tcg_gen_sub_tl(result
, r1
, r2
);
1660 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1662 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1663 tcg_gen_xor_tl(temp
, r1
, r2
);
1664 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1666 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1668 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1669 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1671 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1672 /* write back result */
1673 tcg_gen_mov_tl(ret
, result
);
1675 tcg_temp_free(result
);
1676 tcg_temp_free(temp
);
1679 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1681 TCGv temp
= tcg_temp_new();
1682 tcg_gen_not_tl(temp
, r2
);
1683 gen_addc_CC(ret
, r1
, temp
);
1684 tcg_temp_free(temp
);
1687 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1690 TCGv temp
= tcg_temp_new();
1691 TCGv temp2
= tcg_temp_new();
1692 TCGv result
= tcg_temp_new();
1693 TCGv mask
= tcg_temp_new();
1694 TCGv t0
= tcg_const_i32(0);
1696 /* create mask for sticky bits */
1697 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1698 tcg_gen_shli_tl(mask
, mask
, 31);
1700 tcg_gen_sub_tl(result
, r1
, r2
);
1702 tcg_gen_xor_tl(temp
, result
, r1
);
1703 tcg_gen_xor_tl(temp2
, r1
, r2
);
1704 tcg_gen_and_tl(temp
, temp
, temp2
);
1705 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1707 tcg_gen_and_tl(temp
, temp
, mask
);
1708 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1710 tcg_gen_add_tl(temp
, result
, result
);
1711 tcg_gen_xor_tl(temp
, temp
, result
);
1712 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1714 tcg_gen_and_tl(temp
, temp
, mask
);
1715 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1716 /* write back result */
1717 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1720 tcg_temp_free(temp
);
1721 tcg_temp_free(temp2
);
1722 tcg_temp_free(result
);
1723 tcg_temp_free(mask
);
1727 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1728 TCGv r3
, uint32_t n
, uint32_t mode
)
1730 TCGv temp
= tcg_const_i32(n
);
1731 TCGv temp2
= tcg_temp_new();
1732 TCGv_i64 temp64
= tcg_temp_new_i64();
1735 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1738 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1741 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1744 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1747 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1748 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1749 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1750 tcg_temp_free(temp
);
1751 tcg_temp_free(temp2
);
1752 tcg_temp_free_i64(temp64
);
1756 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1757 TCGv r3
, uint32_t n
, uint32_t mode
)
1759 TCGv temp
= tcg_const_i32(n
);
1760 TCGv temp2
= tcg_temp_new();
1761 TCGv temp3
= tcg_temp_new();
1762 TCGv_i64 temp64
= tcg_temp_new_i64();
1766 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1769 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1772 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1775 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1778 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1779 gen_subs(ret_low
, r1_low
, temp
);
1780 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1781 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1782 gen_subs(ret_high
, r1_high
, temp2
);
1783 /* combine v bits */
1784 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1785 /* combine av bits */
1786 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1788 tcg_temp_free(temp
);
1789 tcg_temp_free(temp2
);
1790 tcg_temp_free(temp3
);
1791 tcg_temp_free_i64(temp64
);
1795 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1796 TCGv r3
, uint32_t n
, uint32_t mode
)
1798 TCGv temp
= tcg_const_i32(n
);
1799 TCGv_i64 temp64
= tcg_temp_new_i64();
1800 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1801 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1804 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1807 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1810 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1813 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1816 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1817 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1818 /* write back result */
1819 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1821 tcg_temp_free(temp
);
1822 tcg_temp_free_i64(temp64
);
1823 tcg_temp_free_i64(temp64_2
);
1824 tcg_temp_free_i64(temp64_3
);
1828 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1829 TCGv r3
, uint32_t n
, uint32_t mode
)
1831 TCGv temp
= tcg_const_i32(n
);
1832 TCGv_i64 temp64
= tcg_temp_new_i64();
1833 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1836 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1839 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1842 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1845 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1848 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1849 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1850 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1852 tcg_temp_free(temp
);
1853 tcg_temp_free_i64(temp64
);
1854 tcg_temp_free_i64(temp64_2
);
1858 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1861 TCGv temp
= tcg_const_i32(n
);
1862 TCGv_i64 temp64
= tcg_temp_new_i64();
1865 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1868 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1871 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1874 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1877 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1879 tcg_temp_free(temp
);
1880 tcg_temp_free_i64(temp64
);
1884 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1886 TCGv temp
= tcg_temp_new();
1887 TCGv temp2
= tcg_temp_new();
1889 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1890 tcg_gen_shli_tl(temp
, r1
, 16);
1891 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1893 tcg_temp_free(temp
);
1894 tcg_temp_free(temp2
);
1898 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1899 uint32_t n
, uint32_t mode
)
1901 TCGv temp
= tcg_const_i32(n
);
1902 TCGv_i64 temp64
= tcg_temp_new_i64();
1905 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1908 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1911 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1914 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1917 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1919 tcg_temp_free(temp
);
1920 tcg_temp_free_i64(temp64
);
1924 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1926 TCGv temp
= tcg_temp_new();
1927 TCGv temp2
= tcg_temp_new();
1929 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1930 tcg_gen_shli_tl(temp
, r1
, 16);
1931 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1933 tcg_temp_free(temp
);
1934 tcg_temp_free(temp2
);
1938 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1940 TCGv temp
= tcg_const_i32(n
);
1941 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1942 tcg_temp_free(temp
);
1946 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1948 TCGv temp
= tcg_const_i32(n
);
1949 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1950 tcg_temp_free(temp
);
1954 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1955 uint32_t up_shift
, CPUTriCoreState
*env
)
1957 TCGv temp
= tcg_temp_new();
1958 TCGv temp2
= tcg_temp_new();
1959 TCGv temp3
= tcg_temp_new();
1960 TCGv_i64 t1
= tcg_temp_new_i64();
1961 TCGv_i64 t2
= tcg_temp_new_i64();
1962 TCGv_i64 t3
= tcg_temp_new_i64();
1963 TCGv_i64 t4
= tcg_temp_new_i64();
1965 tcg_gen_ext_i32_i64(t2
, arg2
);
1966 tcg_gen_ext_i32_i64(t3
, arg3
);
1968 tcg_gen_mul_i64(t2
, t2
, t3
);
1970 tcg_gen_ext_i32_i64(t1
, arg1
);
1971 /* if we shift part of the fraction out, we need to round up */
1972 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1973 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1974 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1975 tcg_gen_add_i64(t2
, t2
, t4
);
1977 tcg_gen_sub_i64(t3
, t1
, t2
);
1978 tcg_gen_extrl_i64_i32(temp3
, t3
);
1980 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1981 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1982 tcg_gen_or_i64(t1
, t1
, t2
);
1983 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1984 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1986 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1987 /* Calc AV/SAV bits */
1988 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1989 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1991 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1992 /* write back result */
1993 tcg_gen_mov_tl(ret
, temp3
);
1995 tcg_temp_free(temp
);
1996 tcg_temp_free(temp2
);
1997 tcg_temp_free(temp3
);
1998 tcg_temp_free_i64(t1
);
1999 tcg_temp_free_i64(t2
);
2000 tcg_temp_free_i64(t3
);
2001 tcg_temp_free_i64(t4
);
2005 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2007 TCGv temp
= tcg_temp_new();
2008 TCGv temp2
= tcg_temp_new();
2010 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2011 } else { /* n is expected to be 1 */
2012 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2013 tcg_gen_shli_tl(temp
, temp
, 1);
2014 /* catch special case r1 = r2 = 0x8000 */
2015 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2016 tcg_gen_sub_tl(temp
, temp
, temp2
);
2018 gen_sub_d(ret
, arg1
, temp
);
2020 tcg_temp_free(temp
);
2021 tcg_temp_free(temp2
);
2025 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2027 TCGv temp
= tcg_temp_new();
2028 TCGv temp2
= tcg_temp_new();
2030 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2031 } else { /* n is expected to be 1 */
2032 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2033 tcg_gen_shli_tl(temp
, temp
, 1);
2034 /* catch special case r1 = r2 = 0x8000 */
2035 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2036 tcg_gen_sub_tl(temp
, temp
, temp2
);
2038 gen_subs(ret
, arg1
, temp
);
2040 tcg_temp_free(temp
);
2041 tcg_temp_free(temp2
);
2045 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2046 TCGv arg3
, uint32_t n
)
2048 TCGv temp
= tcg_temp_new();
2049 TCGv temp2
= tcg_temp_new();
2050 TCGv_i64 t1
= tcg_temp_new_i64();
2051 TCGv_i64 t2
= tcg_temp_new_i64();
2052 TCGv_i64 t3
= tcg_temp_new_i64();
2055 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2056 } else { /* n is expected to be 1 */
2057 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2058 tcg_gen_shli_tl(temp
, temp
, 1);
2059 /* catch special case r1 = r2 = 0x8000 */
2060 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2061 tcg_gen_sub_tl(temp
, temp
, temp2
);
2063 tcg_gen_ext_i32_i64(t2
, temp
);
2064 tcg_gen_shli_i64(t2
, t2
, 16);
2065 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2066 gen_sub64_d(t3
, t1
, t2
);
2067 /* write back result */
2068 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2070 tcg_temp_free_i64(t1
);
2071 tcg_temp_free_i64(t2
);
2072 tcg_temp_free_i64(t3
);
2073 tcg_temp_free(temp
);
2074 tcg_temp_free(temp2
);
2078 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2079 TCGv arg3
, uint32_t n
)
2081 TCGv temp
= tcg_temp_new();
2082 TCGv temp2
= tcg_temp_new();
2083 TCGv_i64 t1
= tcg_temp_new_i64();
2084 TCGv_i64 t2
= tcg_temp_new_i64();
2087 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2088 } else { /* n is expected to be 1 */
2089 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2090 tcg_gen_shli_tl(temp
, temp
, 1);
2091 /* catch special case r1 = r2 = 0x8000 */
2092 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2093 tcg_gen_sub_tl(temp
, temp
, temp2
);
2095 tcg_gen_ext_i32_i64(t2
, temp
);
2096 tcg_gen_shli_i64(t2
, t2
, 16);
2097 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2099 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2100 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2102 tcg_temp_free(temp
);
2103 tcg_temp_free(temp2
);
2104 tcg_temp_free_i64(t1
);
2105 tcg_temp_free_i64(t2
);
2109 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2110 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2112 TCGv_i64 t1
= tcg_temp_new_i64();
2113 TCGv_i64 t2
= tcg_temp_new_i64();
2114 TCGv_i64 t3
= tcg_temp_new_i64();
2115 TCGv_i64 t4
= tcg_temp_new_i64();
2118 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2119 tcg_gen_ext_i32_i64(t2
, arg2
);
2120 tcg_gen_ext_i32_i64(t3
, arg3
);
2122 tcg_gen_mul_i64(t2
, t2
, t3
);
2124 tcg_gen_shli_i64(t2
, t2
, 1);
2126 tcg_gen_sub_i64(t4
, t1
, t2
);
2128 tcg_gen_xor_i64(t3
, t4
, t1
);
2129 tcg_gen_xor_i64(t2
, t1
, t2
);
2130 tcg_gen_and_i64(t3
, t3
, t2
);
2131 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
2132 /* We produce an overflow on the host if the mul before was
2133 (0x80000000 * 0x80000000) << 1). If this is the
2134 case, we negate the ovf. */
2136 temp
= tcg_temp_new();
2137 temp2
= tcg_temp_new();
2138 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2139 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2140 tcg_gen_and_tl(temp
, temp
, temp2
);
2141 tcg_gen_shli_tl(temp
, temp
, 31);
2142 /* negate v bit, if special condition */
2143 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2145 tcg_temp_free(temp
);
2146 tcg_temp_free(temp2
);
2148 /* write back result */
2149 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2151 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2152 /* Calc AV/SAV bits */
2153 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2154 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2156 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2158 tcg_temp_free_i64(t1
);
2159 tcg_temp_free_i64(t2
);
2160 tcg_temp_free_i64(t3
);
2161 tcg_temp_free_i64(t4
);
2165 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2168 TCGv_i64 t1
= tcg_temp_new_i64();
2169 TCGv_i64 t2
= tcg_temp_new_i64();
2170 TCGv_i64 t3
= tcg_temp_new_i64();
2171 TCGv_i64 t4
= tcg_temp_new_i64();
2173 tcg_gen_ext_i32_i64(t1
, arg1
);
2174 tcg_gen_ext_i32_i64(t2
, arg2
);
2175 tcg_gen_ext_i32_i64(t3
, arg3
);
2177 tcg_gen_mul_i64(t2
, t2
, t3
);
2178 /* if we shift part of the fraction out, we need to round up */
2179 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2180 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2181 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2182 tcg_gen_add_i64(t3
, t3
, t4
);
2184 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2186 tcg_temp_free_i64(t1
);
2187 tcg_temp_free_i64(t2
);
2188 tcg_temp_free_i64(t3
);
2189 tcg_temp_free_i64(t4
);
2193 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2194 TCGv arg3
, uint32_t n
)
2196 TCGv_i64 r1
= tcg_temp_new_i64();
2197 TCGv temp
= tcg_const_i32(n
);
2199 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2200 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2201 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2203 tcg_temp_free_i64(r1
);
2204 tcg_temp_free(temp
);
2208 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2209 TCGv r3
, uint32_t n
, uint32_t mode
)
2211 TCGv temp
= tcg_const_i32(n
);
2212 TCGv temp2
= tcg_temp_new();
2213 TCGv_i64 temp64
= tcg_temp_new_i64();
2216 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2219 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2222 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2225 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2228 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2229 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2230 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2231 tcg_temp_free(temp
);
2232 tcg_temp_free(temp2
);
2233 tcg_temp_free_i64(temp64
);
2237 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2238 TCGv r3
, uint32_t n
, uint32_t mode
)
2240 TCGv temp
= tcg_const_i32(n
);
2241 TCGv_i64 temp64
= tcg_temp_new_i64();
2242 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2243 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2246 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2249 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2252 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2255 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2258 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2259 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2260 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2261 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2262 tcg_gen_shli_i64(temp64
, temp64
, 16);
2264 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2265 /* write back result */
2266 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2268 tcg_temp_free(temp
);
2269 tcg_temp_free_i64(temp64
);
2270 tcg_temp_free_i64(temp64_2
);
2271 tcg_temp_free_i64(temp64_3
);
2275 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2277 TCGv temp
= tcg_const_i32(n
);
2278 TCGv temp2
= tcg_temp_new();
2279 TCGv_i64 temp64
= tcg_temp_new_i64();
2282 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2285 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2288 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2291 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2294 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2295 tcg_gen_shli_tl(temp
, r1
, 16);
2296 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2298 tcg_temp_free(temp
);
2299 tcg_temp_free(temp2
);
2300 tcg_temp_free_i64(temp64
);
2304 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2305 TCGv r3
, uint32_t n
, uint32_t mode
)
2307 TCGv temp
= tcg_const_i32(n
);
2308 TCGv temp2
= tcg_temp_new();
2309 TCGv temp3
= tcg_temp_new();
2310 TCGv_i64 temp64
= tcg_temp_new_i64();
2314 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2317 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2320 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2323 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2326 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2327 gen_adds(ret_low
, r1_low
, temp
);
2328 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2329 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2330 gen_subs(ret_high
, r1_high
, temp2
);
2331 /* combine v bits */
2332 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2333 /* combine av bits */
2334 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2336 tcg_temp_free(temp
);
2337 tcg_temp_free(temp2
);
2338 tcg_temp_free(temp3
);
2339 tcg_temp_free_i64(temp64
);
2343 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2344 TCGv r3
, uint32_t n
, uint32_t mode
)
2346 TCGv temp
= tcg_const_i32(n
);
2347 TCGv_i64 temp64
= tcg_temp_new_i64();
2348 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2352 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2355 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2358 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2361 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2364 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2365 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2366 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2367 tcg_gen_shli_i64(temp64
, temp64
, 16);
2368 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2370 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2371 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2373 tcg_temp_free(temp
);
2374 tcg_temp_free_i64(temp64
);
2375 tcg_temp_free_i64(temp64_2
);
2379 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2381 TCGv temp
= tcg_const_i32(n
);
2382 TCGv temp2
= tcg_temp_new();
2383 TCGv_i64 temp64
= tcg_temp_new_i64();
2386 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2389 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2392 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2395 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2398 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2399 tcg_gen_shli_tl(temp
, r1
, 16);
2400 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2402 tcg_temp_free(temp
);
2403 tcg_temp_free(temp2
);
2404 tcg_temp_free_i64(temp64
);
2407 static inline void gen_abs(TCGv ret
, TCGv r1
)
2409 TCGv temp
= tcg_temp_new();
2410 TCGv t0
= tcg_const_i32(0);
2412 tcg_gen_neg_tl(temp
, r1
);
2413 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2414 /* overflow can only happen, if r1 = 0x80000000 */
2415 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2416 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2418 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2420 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2421 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2423 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2425 tcg_temp_free(temp
);
2429 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2431 TCGv temp
= tcg_temp_new_i32();
2432 TCGv result
= tcg_temp_new_i32();
2434 tcg_gen_sub_tl(result
, r1
, r2
);
2435 tcg_gen_sub_tl(temp
, r2
, r1
);
2436 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2439 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2440 tcg_gen_xor_tl(temp
, result
, r2
);
2441 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2442 tcg_gen_xor_tl(temp
, r1
, r2
);
2443 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2445 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2447 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2448 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2450 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2451 /* write back result */
2452 tcg_gen_mov_tl(ret
, result
);
2454 tcg_temp_free(temp
);
2455 tcg_temp_free(result
);
2458 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2460 TCGv temp
= tcg_const_i32(con
);
2461 gen_absdif(ret
, r1
, temp
);
2462 tcg_temp_free(temp
);
2465 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2467 TCGv temp
= tcg_const_i32(con
);
2468 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2469 tcg_temp_free(temp
);
2472 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2474 TCGv high
= tcg_temp_new();
2475 TCGv low
= tcg_temp_new();
2477 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2478 tcg_gen_mov_tl(ret
, low
);
2480 tcg_gen_sari_tl(low
, low
, 31);
2481 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2482 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2484 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2486 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2487 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2489 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2491 tcg_temp_free(high
);
2495 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2497 TCGv temp
= tcg_const_i32(con
);
2498 gen_mul_i32s(ret
, r1
, temp
);
2499 tcg_temp_free(temp
);
2502 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2504 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2506 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2508 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2510 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2511 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2513 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2516 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2519 TCGv temp
= tcg_const_i32(con
);
2520 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2521 tcg_temp_free(temp
);
2524 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2526 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2528 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2530 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2532 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2533 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2535 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2538 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2541 TCGv temp
= tcg_const_i32(con
);
2542 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2543 tcg_temp_free(temp
);
2546 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2548 TCGv temp
= tcg_const_i32(con
);
2549 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2550 tcg_temp_free(temp
);
2553 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2555 TCGv temp
= tcg_const_i32(con
);
2556 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2557 tcg_temp_free(temp
);
2559 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2560 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2562 TCGv temp
= tcg_const_i32(con
);
2563 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2564 tcg_temp_free(temp
);
2567 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2569 TCGv temp
= tcg_const_i32(con
);
2570 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2571 tcg_temp_free(temp
);
2575 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2577 TCGv temp
= tcg_temp_new();
2578 TCGv_i64 temp_64
= tcg_temp_new_i64();
2579 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2582 if (up_shift
== 32) {
2583 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2584 } else if (up_shift
== 16) {
2585 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2586 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2588 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2589 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2590 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2592 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2595 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2596 } else { /* n is expected to be 1 */
2597 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2598 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2600 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2602 if (up_shift
== 0) {
2603 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2605 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2607 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2608 /* overflow only occurs if r1 = r2 = 0x8000 */
2609 if (up_shift
== 0) {/* result is 64 bit */
2610 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2612 } else { /* result is 32 bit */
2613 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2616 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2617 /* calc sv overflow bit */
2618 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2620 /* calc av overflow bit */
2621 if (up_shift
== 0) {
2622 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2623 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2625 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2626 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2628 /* calc sav overflow bit */
2629 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2630 tcg_temp_free(temp
);
2631 tcg_temp_free_i64(temp_64
);
2632 tcg_temp_free_i64(temp2_64
);
2636 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2638 TCGv temp
= tcg_temp_new();
2640 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2641 } else { /* n is expected to be 1 */
2642 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2643 tcg_gen_shli_tl(ret
, ret
, 1);
2644 /* catch special case r1 = r2 = 0x8000 */
2645 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2646 tcg_gen_sub_tl(ret
, ret
, temp
);
2649 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2650 /* calc av overflow bit */
2651 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2652 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2653 /* calc sav overflow bit */
2654 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2656 tcg_temp_free(temp
);
2659 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2661 TCGv temp
= tcg_temp_new();
2663 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2664 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2666 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2667 tcg_gen_shli_tl(ret
, ret
, 1);
2668 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2669 /* catch special case r1 = r2 = 0x8000 */
2670 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2671 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2672 tcg_gen_sub_tl(ret
, ret
, temp
);
2675 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2676 /* calc av overflow bit */
2677 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2678 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2679 /* calc sav overflow bit */
2680 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2681 /* cut halfword off */
2682 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2684 tcg_temp_free(temp
);
2688 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2691 TCGv_i64 temp64
= tcg_temp_new_i64();
2692 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2693 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2694 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2695 tcg_temp_free_i64(temp64
);
2699 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2702 TCGv temp
= tcg_const_i32(con
);
2703 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2704 tcg_temp_free(temp
);
2708 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2711 TCGv_i64 temp64
= tcg_temp_new_i64();
2712 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2713 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2714 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2715 tcg_temp_free_i64(temp64
);
2719 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2722 TCGv temp
= tcg_const_i32(con
);
2723 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2724 tcg_temp_free(temp
);
2727 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2729 TCGv temp
= tcg_const_i32(con
);
2730 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2731 tcg_temp_free(temp
);
2734 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2736 TCGv temp
= tcg_const_i32(con
);
2737 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2738 tcg_temp_free(temp
);
2742 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2745 TCGv_i64 temp64
= tcg_temp_new_i64();
2746 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2747 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2748 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2749 tcg_temp_free_i64(temp64
);
2753 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2756 TCGv temp
= tcg_const_i32(con
);
2757 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2758 tcg_temp_free(temp
);
2762 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2765 TCGv_i64 temp64
= tcg_temp_new_i64();
2766 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2767 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2768 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2769 tcg_temp_free_i64(temp64
);
2773 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2776 TCGv temp
= tcg_const_i32(con
);
2777 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2778 tcg_temp_free(temp
);
2781 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2783 TCGv sat_neg
= tcg_const_i32(low
);
2784 TCGv temp
= tcg_const_i32(up
);
2786 /* sat_neg = (arg < low ) ? low : arg; */
2787 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2789 /* ret = (sat_neg > up ) ? up : sat_neg; */
2790 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2792 tcg_temp_free(sat_neg
);
2793 tcg_temp_free(temp
);
2796 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2798 TCGv temp
= tcg_const_i32(up
);
2799 /* sat_neg = (arg > up ) ? up : arg; */
2800 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2801 tcg_temp_free(temp
);
2804 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2806 if (shift_count
== -32) {
2807 tcg_gen_movi_tl(ret
, 0);
2808 } else if (shift_count
>= 0) {
2809 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2811 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2815 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2817 TCGv temp_low
, temp_high
;
2819 if (shiftcount
== -16) {
2820 tcg_gen_movi_tl(ret
, 0);
2822 temp_high
= tcg_temp_new();
2823 temp_low
= tcg_temp_new();
2825 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2826 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2827 gen_shi(temp_low
, temp_low
, shiftcount
);
2828 gen_shi(ret
, temp_high
, shiftcount
);
2829 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2831 tcg_temp_free(temp_low
);
2832 tcg_temp_free(temp_high
);
2836 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2838 uint32_t msk
, msk_start
;
2839 TCGv temp
= tcg_temp_new();
2840 TCGv temp2
= tcg_temp_new();
2841 TCGv t_0
= tcg_const_i32(0);
2843 if (shift_count
== 0) {
2844 /* Clear PSW.C and PSW.V */
2845 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2846 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2847 tcg_gen_mov_tl(ret
, r1
);
2848 } else if (shift_count
== -32) {
2850 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2851 /* fill ret completly with sign bit */
2852 tcg_gen_sari_tl(ret
, r1
, 31);
2854 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2855 } else if (shift_count
> 0) {
2856 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2857 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2860 msk_start
= 32 - shift_count
;
2861 msk
= ((1 << shift_count
) - 1) << msk_start
;
2862 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2863 /* calc v/sv bits */
2864 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2865 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2866 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2867 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2869 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2871 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2873 tcg_temp_free(t_max
);
2874 tcg_temp_free(t_min
);
2877 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2879 msk
= (1 << -shift_count
) - 1;
2880 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2882 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2884 /* calc av overflow bit */
2885 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2886 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2887 /* calc sav overflow bit */
2888 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2890 tcg_temp_free(temp
);
2891 tcg_temp_free(temp2
);
2895 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2897 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2900 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2902 TCGv temp
= tcg_const_i32(con
);
2903 gen_shas(ret
, r1
, temp
);
2904 tcg_temp_free(temp
);
2907 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2911 if (shift_count
== 0) {
2912 tcg_gen_mov_tl(ret
, r1
);
2913 } else if (shift_count
> 0) {
2914 low
= tcg_temp_new();
2915 high
= tcg_temp_new();
2917 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2918 tcg_gen_shli_tl(low
, r1
, shift_count
);
2919 tcg_gen_shli_tl(ret
, high
, shift_count
);
2920 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2923 tcg_temp_free(high
);
2925 low
= tcg_temp_new();
2926 high
= tcg_temp_new();
2928 tcg_gen_ext16s_tl(low
, r1
);
2929 tcg_gen_sari_tl(low
, low
, -shift_count
);
2930 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2931 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2934 tcg_temp_free(high
);
2939 /* ret = {ret[30:0], (r1 cond r2)}; */
2940 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2942 TCGv temp
= tcg_temp_new();
2943 TCGv temp2
= tcg_temp_new();
2945 tcg_gen_shli_tl(temp
, ret
, 1);
2946 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2947 tcg_gen_or_tl(ret
, temp
, temp2
);
2949 tcg_temp_free(temp
);
2950 tcg_temp_free(temp2
);
2953 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2955 TCGv temp
= tcg_const_i32(con
);
2956 gen_sh_cond(cond
, ret
, r1
, temp
);
2957 tcg_temp_free(temp
);
2960 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2962 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2965 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2967 TCGv temp
= tcg_const_i32(con
);
2968 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2969 tcg_temp_free(temp
);
2972 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2974 TCGv temp
= tcg_const_i32(con
);
2975 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2976 tcg_temp_free(temp
);
2979 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2981 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2984 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2986 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2989 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2991 void(*op1
)(TCGv
, TCGv
, TCGv
),
2992 void(*op2
)(TCGv
, TCGv
, TCGv
))
2996 temp1
= tcg_temp_new();
2997 temp2
= tcg_temp_new();
2999 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3000 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3002 (*op1
)(temp1
, temp1
, temp2
);
3003 (*op2
)(temp1
, ret
, temp1
);
3005 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
3007 tcg_temp_free(temp1
);
3008 tcg_temp_free(temp2
);
3011 /* ret = r1[pos1] op1 r2[pos2]; */
3012 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
3014 void(*op1
)(TCGv
, TCGv
, TCGv
))
3018 temp1
= tcg_temp_new();
3019 temp2
= tcg_temp_new();
3021 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3022 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3024 (*op1
)(ret
, temp1
, temp2
);
3026 tcg_gen_andi_tl(ret
, ret
, 0x1);
3028 tcg_temp_free(temp1
);
3029 tcg_temp_free(temp2
);
3032 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3033 void(*op
)(TCGv
, TCGv
, TCGv
))
3035 TCGv temp
= tcg_temp_new();
3036 TCGv temp2
= tcg_temp_new();
3037 /* temp = (arg1 cond arg2 )*/
3038 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3040 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3041 /* temp = temp insn temp2 */
3042 (*op
)(temp
, temp
, temp2
);
3043 /* ret = {ret[31:1], temp} */
3044 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3046 tcg_temp_free(temp
);
3047 tcg_temp_free(temp2
);
3051 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3052 void(*op
)(TCGv
, TCGv
, TCGv
))
3054 TCGv temp
= tcg_const_i32(con
);
3055 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3056 tcg_temp_free(temp
);
3059 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3060 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3062 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3063 tcg_gen_neg_tl(ret
, ret
);
3066 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3068 TCGv b0
= tcg_temp_new();
3069 TCGv b1
= tcg_temp_new();
3070 TCGv b2
= tcg_temp_new();
3071 TCGv b3
= tcg_temp_new();
3074 tcg_gen_andi_tl(b0
, r1
, 0xff);
3075 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3078 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3079 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3082 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3083 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3086 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3087 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3090 tcg_gen_or_tl(ret
, b0
, b1
);
3091 tcg_gen_or_tl(ret
, ret
, b2
);
3092 tcg_gen_or_tl(ret
, ret
, b3
);
3100 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3102 TCGv h0
= tcg_temp_new();
3103 TCGv h1
= tcg_temp_new();
3106 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3107 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3110 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3111 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3114 tcg_gen_or_tl(ret
, h0
, h1
);
3119 /* mask = ((1 << width) -1) << pos;
3120 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3121 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3123 TCGv mask
= tcg_temp_new();
3124 TCGv temp
= tcg_temp_new();
3125 TCGv temp2
= tcg_temp_new();
3127 tcg_gen_movi_tl(mask
, 1);
3128 tcg_gen_shl_tl(mask
, mask
, width
);
3129 tcg_gen_subi_tl(mask
, mask
, 1);
3130 tcg_gen_shl_tl(mask
, mask
, pos
);
3132 tcg_gen_shl_tl(temp
, r2
, pos
);
3133 tcg_gen_and_tl(temp
, temp
, mask
);
3134 tcg_gen_andc_tl(temp2
, r1
, mask
);
3135 tcg_gen_or_tl(ret
, temp
, temp2
);
3137 tcg_temp_free(mask
);
3138 tcg_temp_free(temp
);
3139 tcg_temp_free(temp2
);
3142 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3144 TCGv_i64 temp
= tcg_temp_new_i64();
3146 gen_helper_bsplit(temp
, r1
);
3147 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3149 tcg_temp_free_i64(temp
);
3152 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3154 TCGv_i64 temp
= tcg_temp_new_i64();
3156 gen_helper_unpack(temp
, r1
);
3157 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3159 tcg_temp_free_i64(temp
);
3163 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3165 TCGv_i64 ret
= tcg_temp_new_i64();
3167 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3168 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3170 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3172 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3174 tcg_temp_free_i64(ret
);
3178 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3180 TCGv_i64 ret
= tcg_temp_new_i64();
3182 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3183 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
3185 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
3187 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3189 tcg_temp_free_i64(ret
);
3192 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
3194 TCGv temp
= tcg_temp_new();
3196 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
3197 tcg_gen_xor_tl(temp
, temp
, arg_low
);
3198 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
3199 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
3200 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3202 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3203 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3204 tcg_temp_free(temp
);
3207 static void gen_calc_usb_mulr_h(TCGv arg
)
3209 TCGv temp
= tcg_temp_new();
3211 tcg_gen_add_tl(temp
, arg
, arg
);
3212 tcg_gen_xor_tl(temp
, temp
, arg
);
3213 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
3214 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3216 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3218 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3219 tcg_temp_free(temp
);
3222 /* helpers for generating program flow micro-ops */
3224 static inline void gen_save_pc(target_ulong pc
)
3226 tcg_gen_movi_tl(cpu_PC
, pc
);
3229 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
3231 TranslationBlock
*tb
;
3233 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
3234 likely(!ctx
->singlestep_enabled
)) {
3237 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3240 if (ctx
->singlestep_enabled
) {
3241 /* raise exception debug */
3247 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3248 TCGv r2
, int16_t address
)
3250 TCGLabel
*jumpLabel
= gen_new_label();
3251 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
3253 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
3255 gen_set_label(jumpLabel
);
3256 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
3259 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3260 int r2
, int16_t address
)
3262 TCGv temp
= tcg_const_i32(r2
);
3263 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
3264 tcg_temp_free(temp
);
3267 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
3269 TCGLabel
*l1
= gen_new_label();
3271 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
3272 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
3273 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
3275 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
3278 static void gen_fcall_save_ctx(DisasContext
*ctx
)
3280 TCGv temp
= tcg_temp_new();
3282 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
3283 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
3284 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3285 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
3287 tcg_temp_free(temp
);
3290 static void gen_fret(DisasContext
*ctx
)
3292 TCGv temp
= tcg_temp_new();
3294 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
3295 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
3296 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
3297 tcg_gen_mov_tl(cpu_PC
, temp
);
3299 ctx
->bstate
= BS_BRANCH
;
3301 tcg_temp_free(temp
);
3304 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
3305 int r2
, int32_t constant
, int32_t offset
)
3311 /* SB-format jumps */
3314 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3316 case OPC1_32_B_CALL
:
3317 case OPC1_16_SB_CALL
:
3318 gen_helper_1arg(call
, ctx
->next_pc
);
3319 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3322 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
3324 case OPC1_16_SB_JNZ
:
3325 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
3327 /* SBC-format jumps */
3328 case OPC1_16_SBC_JEQ
:
3329 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
3331 case OPC1_16_SBC_JNE
:
3332 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
3334 /* SBRN-format jumps */
3335 case OPC1_16_SBRN_JZ_T
:
3336 temp
= tcg_temp_new();
3337 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3338 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3339 tcg_temp_free(temp
);
3341 case OPC1_16_SBRN_JNZ_T
:
3342 temp
= tcg_temp_new();
3343 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3344 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3345 tcg_temp_free(temp
);
3347 /* SBR-format jumps */
3348 case OPC1_16_SBR_JEQ
:
3349 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3352 case OPC1_16_SBR_JNE
:
3353 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3356 case OPC1_16_SBR_JNZ
:
3357 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
3359 case OPC1_16_SBR_JNZ_A
:
3360 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3362 case OPC1_16_SBR_JGEZ
:
3363 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
3365 case OPC1_16_SBR_JGTZ
:
3366 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
3368 case OPC1_16_SBR_JLEZ
:
3369 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
3371 case OPC1_16_SBR_JLTZ
:
3372 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
3374 case OPC1_16_SBR_JZ
:
3375 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
3377 case OPC1_16_SBR_JZ_A
:
3378 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3380 case OPC1_16_SBR_LOOP
:
3381 gen_loop(ctx
, r1
, offset
* 2 - 32);
3383 /* SR-format jumps */
3385 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3388 case OPC2_32_SYS_RET
:
3389 case OPC2_16_SR_RET
:
3390 gen_helper_ret(cpu_env
);
3394 case OPC1_32_B_CALLA
:
3395 gen_helper_1arg(call
, ctx
->next_pc
);
3396 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3398 case OPC1_32_B_FCALL
:
3399 gen_fcall_save_ctx(ctx
);
3400 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3402 case OPC1_32_B_FCALLA
:
3403 gen_fcall_save_ctx(ctx
);
3404 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3407 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3410 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3413 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3414 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3417 case OPCM_32_BRC_EQ_NEQ
:
3418 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3419 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3421 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3424 case OPCM_32_BRC_GE
:
3425 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3426 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3428 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3429 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3433 case OPCM_32_BRC_JLT
:
3434 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3435 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3437 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3438 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3442 case OPCM_32_BRC_JNE
:
3443 temp
= tcg_temp_new();
3444 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3445 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3446 /* subi is unconditional */
3447 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3448 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3450 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3451 /* addi is unconditional */
3452 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3453 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3455 tcg_temp_free(temp
);
3458 case OPCM_32_BRN_JTT
:
3459 n
= MASK_OP_BRN_N(ctx
->opcode
);
3461 temp
= tcg_temp_new();
3462 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3464 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3465 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3467 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3469 tcg_temp_free(temp
);
3472 case OPCM_32_BRR_EQ_NEQ
:
3473 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3474 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3477 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3481 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3482 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3483 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3486 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3490 case OPCM_32_BRR_GE
:
3491 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3492 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3495 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3499 case OPCM_32_BRR_JLT
:
3500 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3501 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3504 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3508 case OPCM_32_BRR_LOOP
:
3509 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3510 gen_loop(ctx
, r2
, offset
* 2);
3512 /* OPC2_32_BRR_LOOPU */
3513 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3516 case OPCM_32_BRR_JNE
:
3517 temp
= tcg_temp_new();
3518 temp2
= tcg_temp_new();
3519 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3520 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3521 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3522 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3523 /* subi is unconditional */
3524 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3525 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3527 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3528 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3529 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3530 /* addi is unconditional */
3531 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3532 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3534 tcg_temp_free(temp
);
3535 tcg_temp_free(temp2
);
3537 case OPCM_32_BRR_JNZ
:
3538 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3539 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3541 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3545 printf("Branch Error at %x\n", ctx
->pc
);
3547 ctx
->bstate
= BS_BRANCH
;
3552 * Functions for decoding instructions
3555 static void decode_src_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int op1
)
3561 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3562 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3565 case OPC1_16_SRC_ADD
:
3566 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3568 case OPC1_16_SRC_ADD_A15
:
3569 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3571 case OPC1_16_SRC_ADD_15A
:
3572 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3574 case OPC1_16_SRC_ADD_A
:
3575 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3577 case OPC1_16_SRC_CADD
:
3578 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3581 case OPC1_16_SRC_CADDN
:
3582 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3585 case OPC1_16_SRC_CMOV
:
3586 temp
= tcg_const_tl(0);
3587 temp2
= tcg_const_tl(const4
);
3588 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3589 temp2
, cpu_gpr_d
[r1
]);
3590 tcg_temp_free(temp
);
3591 tcg_temp_free(temp2
);
3593 case OPC1_16_SRC_CMOVN
:
3594 temp
= tcg_const_tl(0);
3595 temp2
= tcg_const_tl(const4
);
3596 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3597 temp2
, cpu_gpr_d
[r1
]);
3598 tcg_temp_free(temp
);
3599 tcg_temp_free(temp2
);
3601 case OPC1_16_SRC_EQ
:
3602 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3605 case OPC1_16_SRC_LT
:
3606 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3609 case OPC1_16_SRC_MOV
:
3610 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3612 case OPC1_16_SRC_MOV_A
:
3613 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3614 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3616 case OPC1_16_SRC_MOV_E
:
3617 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3618 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3619 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3620 } /* TODO: else raise illegal opcode trap */
3622 case OPC1_16_SRC_SH
:
3623 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3625 case OPC1_16_SRC_SHA
:
3626 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3631 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3636 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3637 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3640 case OPC1_16_SRR_ADD
:
3641 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3643 case OPC1_16_SRR_ADD_A15
:
3644 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3646 case OPC1_16_SRR_ADD_15A
:
3647 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3649 case OPC1_16_SRR_ADD_A
:
3650 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3652 case OPC1_16_SRR_ADDS
:
3653 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3655 case OPC1_16_SRR_AND
:
3656 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3658 case OPC1_16_SRR_CMOV
:
3659 temp
= tcg_const_tl(0);
3660 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3661 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3662 tcg_temp_free(temp
);
3664 case OPC1_16_SRR_CMOVN
:
3665 temp
= tcg_const_tl(0);
3666 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3667 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3668 tcg_temp_free(temp
);
3670 case OPC1_16_SRR_EQ
:
3671 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3674 case OPC1_16_SRR_LT
:
3675 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3678 case OPC1_16_SRR_MOV
:
3679 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3681 case OPC1_16_SRR_MOV_A
:
3682 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3684 case OPC1_16_SRR_MOV_AA
:
3685 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3687 case OPC1_16_SRR_MOV_D
:
3688 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3690 case OPC1_16_SRR_MUL
:
3691 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3693 case OPC1_16_SRR_OR
:
3694 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3696 case OPC1_16_SRR_SUB
:
3697 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3699 case OPC1_16_SRR_SUB_A15B
:
3700 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3702 case OPC1_16_SRR_SUB_15AB
:
3703 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3705 case OPC1_16_SRR_SUBS
:
3706 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3708 case OPC1_16_SRR_XOR
:
3709 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3714 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3718 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3719 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3722 case OPC1_16_SSR_ST_A
:
3723 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3725 case OPC1_16_SSR_ST_A_POSTINC
:
3726 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3727 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3729 case OPC1_16_SSR_ST_B
:
3730 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3732 case OPC1_16_SSR_ST_B_POSTINC
:
3733 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3734 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3736 case OPC1_16_SSR_ST_H
:
3737 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3739 case OPC1_16_SSR_ST_H_POSTINC
:
3740 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3741 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3743 case OPC1_16_SSR_ST_W
:
3744 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3746 case OPC1_16_SSR_ST_W_POSTINC
:
3747 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3748 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3753 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3757 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3760 case OPC1_16_SC_AND
:
3761 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3763 case OPC1_16_SC_BISR
:
3764 gen_helper_1arg(bisr
, const16
& 0xff);
3766 case OPC1_16_SC_LD_A
:
3767 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3769 case OPC1_16_SC_LD_W
:
3770 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3772 case OPC1_16_SC_MOV
:
3773 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3776 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3778 case OPC1_16_SC_ST_A
:
3779 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3781 case OPC1_16_SC_ST_W
:
3782 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3784 case OPC1_16_SC_SUB_A
:
3785 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3790 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3794 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3795 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3799 case OPC1_16_SLR_LD_A
:
3800 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3802 case OPC1_16_SLR_LD_A_POSTINC
:
3803 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3804 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3806 case OPC1_16_SLR_LD_BU
:
3807 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3809 case OPC1_16_SLR_LD_BU_POSTINC
:
3810 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3811 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3813 case OPC1_16_SLR_LD_H
:
3814 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3816 case OPC1_16_SLR_LD_H_POSTINC
:
3817 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3818 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3820 case OPC1_16_SLR_LD_W
:
3821 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3823 case OPC1_16_SLR_LD_W_POSTINC
:
3824 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3825 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3830 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3835 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3836 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3840 case OPC1_16_SRO_LD_A
:
3841 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3843 case OPC1_16_SRO_LD_BU
:
3844 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3846 case OPC1_16_SRO_LD_H
:
3847 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3849 case OPC1_16_SRO_LD_W
:
3850 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3852 case OPC1_16_SRO_ST_A
:
3853 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3855 case OPC1_16_SRO_ST_B
:
3856 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3858 case OPC1_16_SRO_ST_H
:
3859 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3861 case OPC1_16_SRO_ST_W
:
3862 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3867 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3870 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3873 case OPC2_16_SR_NOP
:
3875 case OPC2_16_SR_RET
:
3876 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3878 case OPC2_16_SR_RFE
:
3879 gen_helper_rfe(cpu_env
);
3881 ctx
->bstate
= BS_BRANCH
;
3883 case OPC2_16_SR_DEBUG
:
3884 /* raise EXCP_DEBUG */
3886 case OPC2_16_SR_FRET
:
3891 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3897 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3898 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3901 case OPC2_16_SR_RSUB
:
3902 /* overflow only if r1 = -0x80000000 */
3903 temp
= tcg_const_i32(-0x80000000);
3905 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3906 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3908 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3910 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3912 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3913 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3915 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3916 tcg_temp_free(temp
);
3918 case OPC2_16_SR_SAT_B
:
3919 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3921 case OPC2_16_SR_SAT_BU
:
3922 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3924 case OPC2_16_SR_SAT_H
:
3925 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3927 case OPC2_16_SR_SAT_HU
:
3928 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3933 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3941 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3943 /* handle ADDSC.A opcode only being 6 bit long */
3944 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3945 op1
= OPC1_16_SRRS_ADDSC_A
;
3949 case OPC1_16_SRC_ADD
:
3950 case OPC1_16_SRC_ADD_A15
:
3951 case OPC1_16_SRC_ADD_15A
:
3952 case OPC1_16_SRC_ADD_A
:
3953 case OPC1_16_SRC_CADD
:
3954 case OPC1_16_SRC_CADDN
:
3955 case OPC1_16_SRC_CMOV
:
3956 case OPC1_16_SRC_CMOVN
:
3957 case OPC1_16_SRC_EQ
:
3958 case OPC1_16_SRC_LT
:
3959 case OPC1_16_SRC_MOV
:
3960 case OPC1_16_SRC_MOV_A
:
3961 case OPC1_16_SRC_MOV_E
:
3962 case OPC1_16_SRC_SH
:
3963 case OPC1_16_SRC_SHA
:
3964 decode_src_opc(env
, ctx
, op1
);
3967 case OPC1_16_SRR_ADD
:
3968 case OPC1_16_SRR_ADD_A15
:
3969 case OPC1_16_SRR_ADD_15A
:
3970 case OPC1_16_SRR_ADD_A
:
3971 case OPC1_16_SRR_ADDS
:
3972 case OPC1_16_SRR_AND
:
3973 case OPC1_16_SRR_CMOV
:
3974 case OPC1_16_SRR_CMOVN
:
3975 case OPC1_16_SRR_EQ
:
3976 case OPC1_16_SRR_LT
:
3977 case OPC1_16_SRR_MOV
:
3978 case OPC1_16_SRR_MOV_A
:
3979 case OPC1_16_SRR_MOV_AA
:
3980 case OPC1_16_SRR_MOV_D
:
3981 case OPC1_16_SRR_MUL
:
3982 case OPC1_16_SRR_OR
:
3983 case OPC1_16_SRR_SUB
:
3984 case OPC1_16_SRR_SUB_A15B
:
3985 case OPC1_16_SRR_SUB_15AB
:
3986 case OPC1_16_SRR_SUBS
:
3987 case OPC1_16_SRR_XOR
:
3988 decode_srr_opc(ctx
, op1
);
3991 case OPC1_16_SSR_ST_A
:
3992 case OPC1_16_SSR_ST_A_POSTINC
:
3993 case OPC1_16_SSR_ST_B
:
3994 case OPC1_16_SSR_ST_B_POSTINC
:
3995 case OPC1_16_SSR_ST_H
:
3996 case OPC1_16_SSR_ST_H_POSTINC
:
3997 case OPC1_16_SSR_ST_W
:
3998 case OPC1_16_SSR_ST_W_POSTINC
:
3999 decode_ssr_opc(ctx
, op1
);
4002 case OPC1_16_SRRS_ADDSC_A
:
4003 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
4004 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
4005 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
4006 temp
= tcg_temp_new();
4007 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
4008 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
4009 tcg_temp_free(temp
);
4012 case OPC1_16_SLRO_LD_A
:
4013 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4014 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4015 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4017 case OPC1_16_SLRO_LD_BU
:
4018 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4019 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4020 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4022 case OPC1_16_SLRO_LD_H
:
4023 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4024 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4025 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4027 case OPC1_16_SLRO_LD_W
:
4028 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4029 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4030 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4033 case OPC1_16_SB_CALL
:
4035 case OPC1_16_SB_JNZ
:
4037 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
4038 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
4041 case OPC1_16_SBC_JEQ
:
4042 case OPC1_16_SBC_JNE
:
4043 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
4044 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
4045 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4048 case OPC1_16_SBRN_JNZ_T
:
4049 case OPC1_16_SBRN_JZ_T
:
4050 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
4051 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
4052 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4055 case OPC1_16_SBR_JEQ
:
4056 case OPC1_16_SBR_JGEZ
:
4057 case OPC1_16_SBR_JGTZ
:
4058 case OPC1_16_SBR_JLEZ
:
4059 case OPC1_16_SBR_JLTZ
:
4060 case OPC1_16_SBR_JNE
:
4061 case OPC1_16_SBR_JNZ
:
4062 case OPC1_16_SBR_JNZ_A
:
4063 case OPC1_16_SBR_JZ
:
4064 case OPC1_16_SBR_JZ_A
:
4065 case OPC1_16_SBR_LOOP
:
4066 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
4067 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
4068 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
4071 case OPC1_16_SC_AND
:
4072 case OPC1_16_SC_BISR
:
4073 case OPC1_16_SC_LD_A
:
4074 case OPC1_16_SC_LD_W
:
4075 case OPC1_16_SC_MOV
:
4077 case OPC1_16_SC_ST_A
:
4078 case OPC1_16_SC_ST_W
:
4079 case OPC1_16_SC_SUB_A
:
4080 decode_sc_opc(ctx
, op1
);
4083 case OPC1_16_SLR_LD_A
:
4084 case OPC1_16_SLR_LD_A_POSTINC
:
4085 case OPC1_16_SLR_LD_BU
:
4086 case OPC1_16_SLR_LD_BU_POSTINC
:
4087 case OPC1_16_SLR_LD_H
:
4088 case OPC1_16_SLR_LD_H_POSTINC
:
4089 case OPC1_16_SLR_LD_W
:
4090 case OPC1_16_SLR_LD_W_POSTINC
:
4091 decode_slr_opc(ctx
, op1
);
4094 case OPC1_16_SRO_LD_A
:
4095 case OPC1_16_SRO_LD_BU
:
4096 case OPC1_16_SRO_LD_H
:
4097 case OPC1_16_SRO_LD_W
:
4098 case OPC1_16_SRO_ST_A
:
4099 case OPC1_16_SRO_ST_B
:
4100 case OPC1_16_SRO_ST_H
:
4101 case OPC1_16_SRO_ST_W
:
4102 decode_sro_opc(ctx
, op1
);
4105 case OPC1_16_SSRO_ST_A
:
4106 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4107 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4108 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4110 case OPC1_16_SSRO_ST_B
:
4111 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4112 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4113 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4115 case OPC1_16_SSRO_ST_H
:
4116 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4117 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4118 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4120 case OPC1_16_SSRO_ST_W
:
4121 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4122 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4123 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4126 case OPCM_16_SR_SYSTEM
:
4127 decode_sr_system(env
, ctx
);
4129 case OPCM_16_SR_ACCU
:
4130 decode_sr_accu(env
, ctx
);
4133 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4134 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
4136 case OPC1_16_SR_NOT
:
4137 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4138 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
4144 * 32 bit instructions
4148 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
4155 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4156 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4157 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4159 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4162 case OPC2_32_ABS_LD_A
:
4163 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4165 case OPC2_32_ABS_LD_D
:
4166 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4168 case OPC2_32_ABS_LD_DA
:
4169 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4171 case OPC2_32_ABS_LD_W
:
4172 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4176 tcg_temp_free(temp
);
4179 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
4186 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4187 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4188 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4190 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4193 case OPC2_32_ABS_LD_B
:
4194 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
4196 case OPC2_32_ABS_LD_BU
:
4197 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4199 case OPC2_32_ABS_LD_H
:
4200 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
4202 case OPC2_32_ABS_LD_HU
:
4203 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4207 tcg_temp_free(temp
);
4210 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
4217 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4218 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4219 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4221 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4224 case OPC2_32_ABS_LDMST
:
4225 gen_ldmst(ctx
, r1
, temp
);
4227 case OPC2_32_ABS_SWAP_W
:
4228 gen_swap(ctx
, r1
, temp
);
4232 tcg_temp_free(temp
);
4235 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
4240 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4241 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4244 case OPC2_32_ABS_LDLCX
:
4245 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
4247 case OPC2_32_ABS_LDUCX
:
4248 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
4250 case OPC2_32_ABS_STLCX
:
4251 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
4253 case OPC2_32_ABS_STUCX
:
4254 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
4259 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
4266 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4267 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4268 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4270 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4273 case OPC2_32_ABS_ST_A
:
4274 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4276 case OPC2_32_ABS_ST_D
:
4277 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4279 case OPC2_32_ABS_ST_DA
:
4280 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4282 case OPC2_32_ABS_ST_W
:
4283 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4287 tcg_temp_free(temp
);
4290 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
4297 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4298 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4299 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4301 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4304 case OPC2_32_ABS_ST_B
:
4305 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4307 case OPC2_32_ABS_ST_H
:
4308 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4311 tcg_temp_free(temp
);
4316 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4322 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4323 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4324 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4325 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4326 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4327 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4331 case OPC2_32_BIT_AND_AND_T
:
4332 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4333 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
4335 case OPC2_32_BIT_AND_ANDN_T
:
4336 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4337 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
4339 case OPC2_32_BIT_AND_NOR_T
:
4340 if (TCG_TARGET_HAS_andc_i32
) {
4341 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4342 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
4344 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4345 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4348 case OPC2_32_BIT_AND_OR_T
:
4349 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4350 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4355 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
4360 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4361 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4362 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4363 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4364 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4365 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4368 case OPC2_32_BIT_AND_T
:
4369 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4370 pos1
, pos2
, &tcg_gen_and_tl
);
4372 case OPC2_32_BIT_ANDN_T
:
4373 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4374 pos1
, pos2
, &tcg_gen_andc_tl
);
4376 case OPC2_32_BIT_NOR_T
:
4377 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4378 pos1
, pos2
, &tcg_gen_nor_tl
);
4380 case OPC2_32_BIT_OR_T
:
4381 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4382 pos1
, pos2
, &tcg_gen_or_tl
);
4387 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4393 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4394 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4395 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4396 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4397 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4398 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4400 temp
= tcg_temp_new();
4402 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4403 if (op2
== OPC2_32_BIT_INSN_T
) {
4404 tcg_gen_not_tl(temp
, temp
);
4406 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4407 tcg_temp_free(temp
);
4410 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4417 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4418 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4419 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4420 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4421 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4422 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4425 case OPC2_32_BIT_NAND_T
:
4426 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4427 pos1
, pos2
, &tcg_gen_nand_tl
);
4429 case OPC2_32_BIT_ORN_T
:
4430 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4431 pos1
, pos2
, &tcg_gen_orc_tl
);
4433 case OPC2_32_BIT_XNOR_T
:
4434 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4435 pos1
, pos2
, &tcg_gen_eqv_tl
);
4437 case OPC2_32_BIT_XOR_T
:
4438 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4439 pos1
, pos2
, &tcg_gen_xor_tl
);
4444 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
4451 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4452 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4453 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4454 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4455 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4456 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4459 case OPC2_32_BIT_OR_AND_T
:
4460 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4461 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4463 case OPC2_32_BIT_OR_ANDN_T
:
4464 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4465 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4467 case OPC2_32_BIT_OR_NOR_T
:
4468 if (TCG_TARGET_HAS_orc_i32
) {
4469 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4470 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4472 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4473 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4476 case OPC2_32_BIT_OR_OR_T
:
4477 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4478 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4483 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
4490 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4491 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4492 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4493 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4494 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4495 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4497 temp
= tcg_temp_new();
4500 case OPC2_32_BIT_SH_AND_T
:
4501 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4502 pos1
, pos2
, &tcg_gen_and_tl
);
4504 case OPC2_32_BIT_SH_ANDN_T
:
4505 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4506 pos1
, pos2
, &tcg_gen_andc_tl
);
4508 case OPC2_32_BIT_SH_NOR_T
:
4509 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4510 pos1
, pos2
, &tcg_gen_nor_tl
);
4512 case OPC2_32_BIT_SH_OR_T
:
4513 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4514 pos1
, pos2
, &tcg_gen_or_tl
);
4517 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4518 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4519 tcg_temp_free(temp
);
4522 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4529 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4530 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4531 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4532 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4533 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4534 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4536 temp
= tcg_temp_new();
4539 case OPC2_32_BIT_SH_NAND_T
:
4540 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4541 pos1
, pos2
, &tcg_gen_nand_tl
);
4543 case OPC2_32_BIT_SH_ORN_T
:
4544 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4545 pos1
, pos2
, &tcg_gen_orc_tl
);
4547 case OPC2_32_BIT_SH_XNOR_T
:
4548 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4549 pos1
, pos2
, &tcg_gen_eqv_tl
);
4551 case OPC2_32_BIT_SH_XOR_T
:
4552 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4553 pos1
, pos2
, &tcg_gen_xor_tl
);
4556 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4557 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4558 tcg_temp_free(temp
);
4564 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4572 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4573 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4574 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4575 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4578 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4579 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4580 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4581 /* instruction to access the cache */
4583 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4584 case OPC2_32_BO_CACHEA_W_POSTINC
:
4585 case OPC2_32_BO_CACHEA_I_POSTINC
:
4586 /* instruction to access the cache, but we still need to handle
4587 the addressing mode */
4588 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4590 case OPC2_32_BO_CACHEA_WI_PREINC
:
4591 case OPC2_32_BO_CACHEA_W_PREINC
:
4592 case OPC2_32_BO_CACHEA_I_PREINC
:
4593 /* instruction to access the cache, but we still need to handle
4594 the addressing mode */
4595 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4597 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4598 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4599 /* TODO: Raise illegal opcode trap,
4600 if !tricore_feature(TRICORE_FEATURE_131) */
4602 case OPC2_32_BO_CACHEI_W_POSTINC
:
4603 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4604 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4605 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4606 } /* TODO: else raise illegal opcode trap */
4608 case OPC2_32_BO_CACHEI_W_PREINC
:
4609 case OPC2_32_BO_CACHEI_WI_PREINC
:
4610 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4611 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4612 } /* TODO: else raise illegal opcode trap */
4614 case OPC2_32_BO_ST_A_SHORTOFF
:
4615 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4617 case OPC2_32_BO_ST_A_POSTINC
:
4618 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4620 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4622 case OPC2_32_BO_ST_A_PREINC
:
4623 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4625 case OPC2_32_BO_ST_B_SHORTOFF
:
4626 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4628 case OPC2_32_BO_ST_B_POSTINC
:
4629 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4631 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4633 case OPC2_32_BO_ST_B_PREINC
:
4634 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4636 case OPC2_32_BO_ST_D_SHORTOFF
:
4637 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4640 case OPC2_32_BO_ST_D_POSTINC
:
4641 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4642 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4644 case OPC2_32_BO_ST_D_PREINC
:
4645 temp
= tcg_temp_new();
4646 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4647 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4648 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4649 tcg_temp_free(temp
);
4651 case OPC2_32_BO_ST_DA_SHORTOFF
:
4652 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4655 case OPC2_32_BO_ST_DA_POSTINC
:
4656 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4657 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4659 case OPC2_32_BO_ST_DA_PREINC
:
4660 temp
= tcg_temp_new();
4661 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4662 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4663 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4664 tcg_temp_free(temp
);
4666 case OPC2_32_BO_ST_H_SHORTOFF
:
4667 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4669 case OPC2_32_BO_ST_H_POSTINC
:
4670 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4672 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4674 case OPC2_32_BO_ST_H_PREINC
:
4675 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4677 case OPC2_32_BO_ST_Q_SHORTOFF
:
4678 temp
= tcg_temp_new();
4679 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4680 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4681 tcg_temp_free(temp
);
4683 case OPC2_32_BO_ST_Q_POSTINC
:
4684 temp
= tcg_temp_new();
4685 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4686 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4688 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4689 tcg_temp_free(temp
);
4691 case OPC2_32_BO_ST_Q_PREINC
:
4692 temp
= tcg_temp_new();
4693 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4694 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4695 tcg_temp_free(temp
);
4697 case OPC2_32_BO_ST_W_SHORTOFF
:
4698 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4700 case OPC2_32_BO_ST_W_POSTINC
:
4701 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4703 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4705 case OPC2_32_BO_ST_W_PREINC
:
4706 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4711 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4717 TCGv temp
, temp2
, temp3
;
4719 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4720 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4721 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4722 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4724 temp
= tcg_temp_new();
4725 temp2
= tcg_temp_new();
4726 temp3
= tcg_const_i32(off10
);
4728 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4729 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4732 case OPC2_32_BO_CACHEA_WI_BR
:
4733 case OPC2_32_BO_CACHEA_W_BR
:
4734 case OPC2_32_BO_CACHEA_I_BR
:
4735 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4737 case OPC2_32_BO_CACHEA_WI_CIRC
:
4738 case OPC2_32_BO_CACHEA_W_CIRC
:
4739 case OPC2_32_BO_CACHEA_I_CIRC
:
4740 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4742 case OPC2_32_BO_ST_A_BR
:
4743 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4744 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4746 case OPC2_32_BO_ST_A_CIRC
:
4747 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4748 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4750 case OPC2_32_BO_ST_B_BR
:
4751 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4752 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4754 case OPC2_32_BO_ST_B_CIRC
:
4755 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4756 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4758 case OPC2_32_BO_ST_D_BR
:
4759 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4760 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4762 case OPC2_32_BO_ST_D_CIRC
:
4763 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4764 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4765 tcg_gen_addi_tl(temp
, temp
, 4);
4766 tcg_gen_rem_tl(temp
, temp
, temp2
);
4767 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4768 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4769 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4771 case OPC2_32_BO_ST_DA_BR
:
4772 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4773 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4775 case OPC2_32_BO_ST_DA_CIRC
:
4776 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4777 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4778 tcg_gen_addi_tl(temp
, temp
, 4);
4779 tcg_gen_rem_tl(temp
, temp
, temp2
);
4780 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4781 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4782 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4784 case OPC2_32_BO_ST_H_BR
:
4785 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4786 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4788 case OPC2_32_BO_ST_H_CIRC
:
4789 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4790 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4792 case OPC2_32_BO_ST_Q_BR
:
4793 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4794 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4795 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4797 case OPC2_32_BO_ST_Q_CIRC
:
4798 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4799 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4800 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4802 case OPC2_32_BO_ST_W_BR
:
4803 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4804 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4806 case OPC2_32_BO_ST_W_CIRC
:
4807 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4808 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4811 tcg_temp_free(temp
);
4812 tcg_temp_free(temp2
);
4813 tcg_temp_free(temp3
);
4816 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4824 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4825 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4826 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4827 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4830 case OPC2_32_BO_LD_A_SHORTOFF
:
4831 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4833 case OPC2_32_BO_LD_A_POSTINC
:
4834 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4836 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4838 case OPC2_32_BO_LD_A_PREINC
:
4839 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4841 case OPC2_32_BO_LD_B_SHORTOFF
:
4842 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4844 case OPC2_32_BO_LD_B_POSTINC
:
4845 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4847 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4849 case OPC2_32_BO_LD_B_PREINC
:
4850 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4852 case OPC2_32_BO_LD_BU_SHORTOFF
:
4853 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4855 case OPC2_32_BO_LD_BU_POSTINC
:
4856 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4858 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4860 case OPC2_32_BO_LD_BU_PREINC
:
4861 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4863 case OPC2_32_BO_LD_D_SHORTOFF
:
4864 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4867 case OPC2_32_BO_LD_D_POSTINC
:
4868 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4869 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4871 case OPC2_32_BO_LD_D_PREINC
:
4872 temp
= tcg_temp_new();
4873 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4874 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4875 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4876 tcg_temp_free(temp
);
4878 case OPC2_32_BO_LD_DA_SHORTOFF
:
4879 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4882 case OPC2_32_BO_LD_DA_POSTINC
:
4883 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4884 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4886 case OPC2_32_BO_LD_DA_PREINC
:
4887 temp
= tcg_temp_new();
4888 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4889 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4890 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4891 tcg_temp_free(temp
);
4893 case OPC2_32_BO_LD_H_SHORTOFF
:
4894 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4896 case OPC2_32_BO_LD_H_POSTINC
:
4897 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4899 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4901 case OPC2_32_BO_LD_H_PREINC
:
4902 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4904 case OPC2_32_BO_LD_HU_SHORTOFF
:
4905 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4907 case OPC2_32_BO_LD_HU_POSTINC
:
4908 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4910 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4912 case OPC2_32_BO_LD_HU_PREINC
:
4913 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4915 case OPC2_32_BO_LD_Q_SHORTOFF
:
4916 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4917 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4919 case OPC2_32_BO_LD_Q_POSTINC
:
4920 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4922 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4923 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4925 case OPC2_32_BO_LD_Q_PREINC
:
4926 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4927 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4929 case OPC2_32_BO_LD_W_SHORTOFF
:
4930 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4932 case OPC2_32_BO_LD_W_POSTINC
:
4933 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4935 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4937 case OPC2_32_BO_LD_W_PREINC
:
4938 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4943 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4950 TCGv temp
, temp2
, temp3
;
4952 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4953 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4954 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4955 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4957 temp
= tcg_temp_new();
4958 temp2
= tcg_temp_new();
4959 temp3
= tcg_const_i32(off10
);
4961 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4962 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4966 case OPC2_32_BO_LD_A_BR
:
4967 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4968 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4970 case OPC2_32_BO_LD_A_CIRC
:
4971 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4972 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4974 case OPC2_32_BO_LD_B_BR
:
4975 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4976 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4978 case OPC2_32_BO_LD_B_CIRC
:
4979 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4980 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4982 case OPC2_32_BO_LD_BU_BR
:
4983 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4984 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4986 case OPC2_32_BO_LD_BU_CIRC
:
4987 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4988 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4990 case OPC2_32_BO_LD_D_BR
:
4991 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4992 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4994 case OPC2_32_BO_LD_D_CIRC
:
4995 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4996 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4997 tcg_gen_addi_tl(temp
, temp
, 4);
4998 tcg_gen_rem_tl(temp
, temp
, temp2
);
4999 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5000 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5001 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5003 case OPC2_32_BO_LD_DA_BR
:
5004 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
5005 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5007 case OPC2_32_BO_LD_DA_CIRC
:
5008 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5009 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
5010 tcg_gen_addi_tl(temp
, temp
, 4);
5011 tcg_gen_rem_tl(temp
, temp
, temp2
);
5012 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5013 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5014 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5016 case OPC2_32_BO_LD_H_BR
:
5017 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5018 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5020 case OPC2_32_BO_LD_H_CIRC
:
5021 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5022 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5024 case OPC2_32_BO_LD_HU_BR
:
5025 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5026 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5028 case OPC2_32_BO_LD_HU_CIRC
:
5029 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5030 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5032 case OPC2_32_BO_LD_Q_BR
:
5033 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5034 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5035 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5037 case OPC2_32_BO_LD_Q_CIRC
:
5038 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5039 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5040 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5042 case OPC2_32_BO_LD_W_BR
:
5043 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5044 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5046 case OPC2_32_BO_LD_W_CIRC
:
5047 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5048 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5051 tcg_temp_free(temp
);
5052 tcg_temp_free(temp2
);
5053 tcg_temp_free(temp3
);
5056 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
5065 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5066 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5067 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5068 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5071 temp
= tcg_temp_new();
5072 temp2
= tcg_temp_new();
5075 case OPC2_32_BO_LDLCX_SHORTOFF
:
5076 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5077 gen_helper_ldlcx(cpu_env
, temp
);
5079 case OPC2_32_BO_LDMST_SHORTOFF
:
5080 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5081 gen_ldmst(ctx
, r1
, temp
);
5083 case OPC2_32_BO_LDMST_POSTINC
:
5084 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5085 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5087 case OPC2_32_BO_LDMST_PREINC
:
5088 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5089 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5091 case OPC2_32_BO_LDUCX_SHORTOFF
:
5092 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5093 gen_helper_lducx(cpu_env
, temp
);
5095 case OPC2_32_BO_LEA_SHORTOFF
:
5096 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
5098 case OPC2_32_BO_STLCX_SHORTOFF
:
5099 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5100 gen_helper_stlcx(cpu_env
, temp
);
5102 case OPC2_32_BO_STUCX_SHORTOFF
:
5103 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5104 gen_helper_stucx(cpu_env
, temp
);
5106 case OPC2_32_BO_SWAP_W_SHORTOFF
:
5107 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5108 gen_swap(ctx
, r1
, temp
);
5110 case OPC2_32_BO_SWAP_W_POSTINC
:
5111 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5112 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5114 case OPC2_32_BO_SWAP_W_PREINC
:
5115 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5116 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5118 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
5119 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5120 gen_cmpswap(ctx
, r1
, temp
);
5122 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
5123 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5124 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5126 case OPC2_32_BO_CMPSWAP_W_PREINC
:
5127 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5128 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5130 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
5131 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5132 gen_swapmsk(ctx
, r1
, temp
);
5134 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
5135 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5136 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5138 case OPC2_32_BO_SWAPMSK_W_PREINC
:
5139 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5140 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5143 tcg_temp_free(temp
);
5144 tcg_temp_free(temp2
);
5147 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
5154 TCGv temp
, temp2
, temp3
;
5156 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5157 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5158 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5159 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5161 temp
= tcg_temp_new();
5162 temp2
= tcg_temp_new();
5163 temp3
= tcg_const_i32(off10
);
5165 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
5166 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5169 case OPC2_32_BO_LDMST_BR
:
5170 gen_ldmst(ctx
, r1
, temp2
);
5171 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5173 case OPC2_32_BO_LDMST_CIRC
:
5174 gen_ldmst(ctx
, r1
, temp2
);
5175 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5177 case OPC2_32_BO_SWAP_W_BR
:
5178 gen_swap(ctx
, r1
, temp2
);
5179 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5181 case OPC2_32_BO_SWAP_W_CIRC
:
5182 gen_swap(ctx
, r1
, temp2
);
5183 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5185 case OPC2_32_BO_CMPSWAP_W_BR
:
5186 gen_cmpswap(ctx
, r1
, temp2
);
5187 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5189 case OPC2_32_BO_CMPSWAP_W_CIRC
:
5190 gen_cmpswap(ctx
, r1
, temp2
);
5191 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5193 case OPC2_32_BO_SWAPMSK_W_BR
:
5194 gen_swapmsk(ctx
, r1
, temp2
);
5195 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5197 case OPC2_32_BO_SWAPMSK_W_CIRC
:
5198 gen_swapmsk(ctx
, r1
, temp2
);
5199 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5203 tcg_temp_free(temp
);
5204 tcg_temp_free(temp2
);
5205 tcg_temp_free(temp3
);
5208 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
5214 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
5215 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
5216 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
5219 case OPC1_32_BOL_LD_A_LONGOFF
:
5220 temp
= tcg_temp_new();
5221 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5222 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5223 tcg_temp_free(temp
);
5225 case OPC1_32_BOL_LD_W_LONGOFF
:
5226 temp
= tcg_temp_new();
5227 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5228 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5229 tcg_temp_free(temp
);
5231 case OPC1_32_BOL_LEA_LONGOFF
:
5232 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
5234 case OPC1_32_BOL_ST_A_LONGOFF
:
5235 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5236 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5238 /* raise illegal opcode trap */
5241 case OPC1_32_BOL_ST_W_LONGOFF
:
5242 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5244 case OPC1_32_BOL_LD_B_LONGOFF
:
5245 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5246 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5248 /* raise illegal opcode trap */
5251 case OPC1_32_BOL_LD_BU_LONGOFF
:
5252 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5253 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
5255 /* raise illegal opcode trap */
5258 case OPC1_32_BOL_LD_H_LONGOFF
:
5259 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5260 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5262 /* raise illegal opcode trap */
5265 case OPC1_32_BOL_LD_HU_LONGOFF
:
5266 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5267 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
5269 /* raise illegal opcode trap */
5272 case OPC1_32_BOL_ST_B_LONGOFF
:
5273 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5274 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5276 /* raise illegal opcode trap */
5279 case OPC1_32_BOL_ST_H_LONGOFF
:
5280 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5281 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5283 /* raise illegal opcode trap */
5290 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5297 r2
= MASK_OP_RC_D(ctx
->opcode
);
5298 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5299 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5300 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5302 temp
= tcg_temp_new();
5305 case OPC2_32_RC_AND
:
5306 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5308 case OPC2_32_RC_ANDN
:
5309 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5311 case OPC2_32_RC_NAND
:
5312 tcg_gen_movi_tl(temp
, const9
);
5313 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5315 case OPC2_32_RC_NOR
:
5316 tcg_gen_movi_tl(temp
, const9
);
5317 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5320 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5322 case OPC2_32_RC_ORN
:
5323 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5326 const9
= sextract32(const9
, 0, 6);
5327 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5329 case OPC2_32_RC_SH_H
:
5330 const9
= sextract32(const9
, 0, 5);
5331 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5333 case OPC2_32_RC_SHA
:
5334 const9
= sextract32(const9
, 0, 6);
5335 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5337 case OPC2_32_RC_SHA_H
:
5338 const9
= sextract32(const9
, 0, 5);
5339 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5341 case OPC2_32_RC_SHAS
:
5342 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5344 case OPC2_32_RC_XNOR
:
5345 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5346 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5348 case OPC2_32_RC_XOR
:
5349 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5352 tcg_temp_free(temp
);
5355 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5363 r2
= MASK_OP_RC_D(ctx
->opcode
);
5364 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5365 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5367 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5369 temp
= tcg_temp_new();
5372 case OPC2_32_RC_ABSDIF
:
5373 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5375 case OPC2_32_RC_ABSDIFS
:
5376 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5378 case OPC2_32_RC_ADD
:
5379 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5381 case OPC2_32_RC_ADDC
:
5382 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5384 case OPC2_32_RC_ADDS
:
5385 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5387 case OPC2_32_RC_ADDS_U
:
5388 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5390 case OPC2_32_RC_ADDX
:
5391 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5393 case OPC2_32_RC_AND_EQ
:
5394 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5395 const9
, &tcg_gen_and_tl
);
5397 case OPC2_32_RC_AND_GE
:
5398 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5399 const9
, &tcg_gen_and_tl
);
5401 case OPC2_32_RC_AND_GE_U
:
5402 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5403 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5404 const9
, &tcg_gen_and_tl
);
5406 case OPC2_32_RC_AND_LT
:
5407 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5408 const9
, &tcg_gen_and_tl
);
5410 case OPC2_32_RC_AND_LT_U
:
5411 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5412 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5413 const9
, &tcg_gen_and_tl
);
5415 case OPC2_32_RC_AND_NE
:
5416 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5417 const9
, &tcg_gen_and_tl
);
5420 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5422 case OPC2_32_RC_EQANY_B
:
5423 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5425 case OPC2_32_RC_EQANY_H
:
5426 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5429 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5431 case OPC2_32_RC_GE_U
:
5432 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5433 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5436 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5438 case OPC2_32_RC_LT_U
:
5439 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5440 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5442 case OPC2_32_RC_MAX
:
5443 tcg_gen_movi_tl(temp
, const9
);
5444 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5445 cpu_gpr_d
[r1
], temp
);
5447 case OPC2_32_RC_MAX_U
:
5448 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5449 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5450 cpu_gpr_d
[r1
], temp
);
5452 case OPC2_32_RC_MIN
:
5453 tcg_gen_movi_tl(temp
, const9
);
5454 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5455 cpu_gpr_d
[r1
], temp
);
5457 case OPC2_32_RC_MIN_U
:
5458 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5459 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5460 cpu_gpr_d
[r1
], temp
);
5463 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5465 case OPC2_32_RC_OR_EQ
:
5466 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5467 const9
, &tcg_gen_or_tl
);
5469 case OPC2_32_RC_OR_GE
:
5470 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5471 const9
, &tcg_gen_or_tl
);
5473 case OPC2_32_RC_OR_GE_U
:
5474 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5475 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5476 const9
, &tcg_gen_or_tl
);
5478 case OPC2_32_RC_OR_LT
:
5479 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5480 const9
, &tcg_gen_or_tl
);
5482 case OPC2_32_RC_OR_LT_U
:
5483 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5484 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5485 const9
, &tcg_gen_or_tl
);
5487 case OPC2_32_RC_OR_NE
:
5488 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5489 const9
, &tcg_gen_or_tl
);
5491 case OPC2_32_RC_RSUB
:
5492 tcg_gen_movi_tl(temp
, const9
);
5493 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5495 case OPC2_32_RC_RSUBS
:
5496 tcg_gen_movi_tl(temp
, const9
);
5497 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5499 case OPC2_32_RC_RSUBS_U
:
5500 tcg_gen_movi_tl(temp
, const9
);
5501 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5503 case OPC2_32_RC_SH_EQ
:
5504 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5506 case OPC2_32_RC_SH_GE
:
5507 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5509 case OPC2_32_RC_SH_GE_U
:
5510 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5511 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5513 case OPC2_32_RC_SH_LT
:
5514 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5516 case OPC2_32_RC_SH_LT_U
:
5517 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5518 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5520 case OPC2_32_RC_SH_NE
:
5521 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5523 case OPC2_32_RC_XOR_EQ
:
5524 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5525 const9
, &tcg_gen_xor_tl
);
5527 case OPC2_32_RC_XOR_GE
:
5528 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5529 const9
, &tcg_gen_xor_tl
);
5531 case OPC2_32_RC_XOR_GE_U
:
5532 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5533 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5534 const9
, &tcg_gen_xor_tl
);
5536 case OPC2_32_RC_XOR_LT
:
5537 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5538 const9
, &tcg_gen_xor_tl
);
5540 case OPC2_32_RC_XOR_LT_U
:
5541 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5542 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5543 const9
, &tcg_gen_xor_tl
);
5545 case OPC2_32_RC_XOR_NE
:
5546 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5547 const9
, &tcg_gen_xor_tl
);
5550 tcg_temp_free(temp
);
5553 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
5558 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5559 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5562 case OPC2_32_RC_BISR
:
5563 gen_helper_1arg(bisr
, const9
);
5565 case OPC2_32_RC_SYSCALL
:
5566 /* TODO: Add exception generation */
5571 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5577 r2
= MASK_OP_RC_D(ctx
->opcode
);
5578 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5579 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5581 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5584 case OPC2_32_RC_MUL_32
:
5585 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5587 case OPC2_32_RC_MUL_64
:
5588 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5590 case OPC2_32_RC_MULS_32
:
5591 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5593 case OPC2_32_RC_MUL_U_64
:
5594 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5595 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5597 case OPC2_32_RC_MULS_U_32
:
5598 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5599 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5605 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5609 int32_t pos
, width
, const4
;
5613 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5614 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5615 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5616 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5617 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5618 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5621 case OPC2_32_RCPW_IMASK
:
5622 /* if pos + width > 31 undefined result */
5623 if (pos
+ width
<= 31) {
5624 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5625 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5628 case OPC2_32_RCPW_INSERT
:
5629 /* if pos + width > 32 undefined result */
5630 if (pos
+ width
<= 32) {
5631 temp
= tcg_const_i32(const4
);
5632 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5633 tcg_temp_free(temp
);
5641 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5645 int32_t width
, const4
;
5647 TCGv temp
, temp2
, temp3
;
5649 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5650 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5651 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5652 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5653 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5654 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5656 temp
= tcg_temp_new();
5657 temp2
= tcg_temp_new();
5660 case OPC2_32_RCRW_IMASK
:
5661 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5662 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5663 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5664 tcg_gen_movi_tl(temp2
, const4
);
5665 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5667 case OPC2_32_RCRW_INSERT
:
5668 temp3
= tcg_temp_new();
5670 tcg_gen_movi_tl(temp
, width
);
5671 tcg_gen_movi_tl(temp2
, const4
);
5672 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5673 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5675 tcg_temp_free(temp3
);
5678 tcg_temp_free(temp
);
5679 tcg_temp_free(temp2
);
5684 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5692 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5693 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5694 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5695 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5696 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5699 case OPC2_32_RCR_CADD
:
5700 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5703 case OPC2_32_RCR_CADDN
:
5704 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5707 case OPC2_32_RCR_SEL
:
5708 temp
= tcg_const_i32(0);
5709 temp2
= tcg_const_i32(const9
);
5710 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5711 cpu_gpr_d
[r1
], temp2
);
5712 tcg_temp_free(temp
);
5713 tcg_temp_free(temp2
);
5715 case OPC2_32_RCR_SELN
:
5716 temp
= tcg_const_i32(0);
5717 temp2
= tcg_const_i32(const9
);
5718 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5719 cpu_gpr_d
[r1
], temp2
);
5720 tcg_temp_free(temp
);
5721 tcg_temp_free(temp2
);
5726 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5733 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5734 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5735 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5736 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5737 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5740 case OPC2_32_RCR_MADD_32
:
5741 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5743 case OPC2_32_RCR_MADD_64
:
5744 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5745 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5747 case OPC2_32_RCR_MADDS_32
:
5748 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5750 case OPC2_32_RCR_MADDS_64
:
5751 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5752 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5754 case OPC2_32_RCR_MADD_U_64
:
5755 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5756 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5757 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5759 case OPC2_32_RCR_MADDS_U_32
:
5760 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5761 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5763 case OPC2_32_RCR_MADDS_U_64
:
5764 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5765 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5766 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5771 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5778 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5779 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5780 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5781 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5782 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5785 case OPC2_32_RCR_MSUB_32
:
5786 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5788 case OPC2_32_RCR_MSUB_64
:
5789 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5790 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5792 case OPC2_32_RCR_MSUBS_32
:
5793 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5795 case OPC2_32_RCR_MSUBS_64
:
5796 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5797 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5799 case OPC2_32_RCR_MSUB_U_64
:
5800 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5801 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5802 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5804 case OPC2_32_RCR_MSUBS_U_32
:
5805 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5806 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5808 case OPC2_32_RCR_MSUBS_U_64
:
5809 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5810 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5811 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5818 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5824 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5825 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5826 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5829 case OPC1_32_RLC_ADDI
:
5830 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5832 case OPC1_32_RLC_ADDIH
:
5833 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5835 case OPC1_32_RLC_ADDIH_A
:
5836 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5838 case OPC1_32_RLC_MFCR
:
5839 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5840 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5842 case OPC1_32_RLC_MOV
:
5843 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5845 case OPC1_32_RLC_MOV_64
:
5846 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5847 if ((r2
& 0x1) != 0) {
5848 /* TODO: raise OPD trap */
5850 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5851 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5853 /* TODO: raise illegal opcode trap */
5856 case OPC1_32_RLC_MOV_U
:
5857 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5858 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5860 case OPC1_32_RLC_MOV_H
:
5861 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5863 case OPC1_32_RLC_MOVH_A
:
5864 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5866 case OPC1_32_RLC_MTCR
:
5867 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5868 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5874 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5879 r3
= MASK_OP_RR_D(ctx
->opcode
);
5880 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5881 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5882 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5885 case OPC2_32_RR_ABS
:
5886 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5888 case OPC2_32_RR_ABS_B
:
5889 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5891 case OPC2_32_RR_ABS_H
:
5892 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5894 case OPC2_32_RR_ABSDIF
:
5895 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5897 case OPC2_32_RR_ABSDIF_B
:
5898 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5901 case OPC2_32_RR_ABSDIF_H
:
5902 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5905 case OPC2_32_RR_ABSDIFS
:
5906 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5909 case OPC2_32_RR_ABSDIFS_H
:
5910 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5913 case OPC2_32_RR_ABSS
:
5914 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5916 case OPC2_32_RR_ABSS_H
:
5917 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5919 case OPC2_32_RR_ADD
:
5920 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5922 case OPC2_32_RR_ADD_B
:
5923 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5925 case OPC2_32_RR_ADD_H
:
5926 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5928 case OPC2_32_RR_ADDC
:
5929 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5931 case OPC2_32_RR_ADDS
:
5932 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5934 case OPC2_32_RR_ADDS_H
:
5935 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5938 case OPC2_32_RR_ADDS_HU
:
5939 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5942 case OPC2_32_RR_ADDS_U
:
5943 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5946 case OPC2_32_RR_ADDX
:
5947 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5949 case OPC2_32_RR_AND_EQ
:
5950 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5951 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5953 case OPC2_32_RR_AND_GE
:
5954 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5955 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5957 case OPC2_32_RR_AND_GE_U
:
5958 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5959 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5961 case OPC2_32_RR_AND_LT
:
5962 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5963 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5965 case OPC2_32_RR_AND_LT_U
:
5966 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5967 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5969 case OPC2_32_RR_AND_NE
:
5970 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5971 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5974 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5977 case OPC2_32_RR_EQ_B
:
5978 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5980 case OPC2_32_RR_EQ_H
:
5981 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5983 case OPC2_32_RR_EQ_W
:
5984 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5986 case OPC2_32_RR_EQANY_B
:
5987 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5989 case OPC2_32_RR_EQANY_H
:
5990 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5993 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5996 case OPC2_32_RR_GE_U
:
5997 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6001 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6004 case OPC2_32_RR_LT_U
:
6005 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6008 case OPC2_32_RR_LT_B
:
6009 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6011 case OPC2_32_RR_LT_BU
:
6012 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6014 case OPC2_32_RR_LT_H
:
6015 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6017 case OPC2_32_RR_LT_HU
:
6018 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6020 case OPC2_32_RR_LT_W
:
6021 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6023 case OPC2_32_RR_LT_WU
:
6024 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6026 case OPC2_32_RR_MAX
:
6027 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6028 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6030 case OPC2_32_RR_MAX_U
:
6031 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6032 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6034 case OPC2_32_RR_MAX_B
:
6035 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6037 case OPC2_32_RR_MAX_BU
:
6038 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6040 case OPC2_32_RR_MAX_H
:
6041 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6043 case OPC2_32_RR_MAX_HU
:
6044 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6046 case OPC2_32_RR_MIN
:
6047 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6048 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6050 case OPC2_32_RR_MIN_U
:
6051 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6052 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6054 case OPC2_32_RR_MIN_B
:
6055 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6057 case OPC2_32_RR_MIN_BU
:
6058 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6060 case OPC2_32_RR_MIN_H
:
6061 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6063 case OPC2_32_RR_MIN_HU
:
6064 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6066 case OPC2_32_RR_MOV
:
6067 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6070 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6073 case OPC2_32_RR_OR_EQ
:
6074 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6075 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6077 case OPC2_32_RR_OR_GE
:
6078 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6079 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6081 case OPC2_32_RR_OR_GE_U
:
6082 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6083 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6085 case OPC2_32_RR_OR_LT
:
6086 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6087 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6089 case OPC2_32_RR_OR_LT_U
:
6090 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6091 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6093 case OPC2_32_RR_OR_NE
:
6094 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6095 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6097 case OPC2_32_RR_SAT_B
:
6098 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
6100 case OPC2_32_RR_SAT_BU
:
6101 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
6103 case OPC2_32_RR_SAT_H
:
6104 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
6106 case OPC2_32_RR_SAT_HU
:
6107 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
6109 case OPC2_32_RR_SH_EQ
:
6110 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6113 case OPC2_32_RR_SH_GE
:
6114 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6117 case OPC2_32_RR_SH_GE_U
:
6118 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6121 case OPC2_32_RR_SH_LT
:
6122 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6125 case OPC2_32_RR_SH_LT_U
:
6126 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6129 case OPC2_32_RR_SH_NE
:
6130 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6133 case OPC2_32_RR_SUB
:
6134 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6136 case OPC2_32_RR_SUB_B
:
6137 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6139 case OPC2_32_RR_SUB_H
:
6140 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6142 case OPC2_32_RR_SUBC
:
6143 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6145 case OPC2_32_RR_SUBS
:
6146 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6148 case OPC2_32_RR_SUBS_U
:
6149 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6151 case OPC2_32_RR_SUBS_H
:
6152 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6155 case OPC2_32_RR_SUBS_HU
:
6156 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6159 case OPC2_32_RR_SUBX
:
6160 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6162 case OPC2_32_RR_XOR_EQ
:
6163 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6164 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6166 case OPC2_32_RR_XOR_GE
:
6167 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6168 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6170 case OPC2_32_RR_XOR_GE_U
:
6171 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6172 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6174 case OPC2_32_RR_XOR_LT
:
6175 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6176 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6178 case OPC2_32_RR_XOR_LT_U
:
6179 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6180 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6182 case OPC2_32_RR_XOR_NE
:
6183 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6184 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6189 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
6195 r3
= MASK_OP_RR_D(ctx
->opcode
);
6196 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6197 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6199 temp
= tcg_temp_new();
6200 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6203 case OPC2_32_RR_AND
:
6204 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6206 case OPC2_32_RR_ANDN
:
6207 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6209 case OPC2_32_RR_CLO
:
6210 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6212 case OPC2_32_RR_CLO_H
:
6213 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6215 case OPC2_32_RR_CLS
:
6216 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6218 case OPC2_32_RR_CLS_H
:
6219 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6221 case OPC2_32_RR_CLZ
:
6222 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6224 case OPC2_32_RR_CLZ_H
:
6225 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6227 case OPC2_32_RR_NAND
:
6228 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6230 case OPC2_32_RR_NOR
:
6231 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6234 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6236 case OPC2_32_RR_ORN
:
6237 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6240 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6242 case OPC2_32_RR_SH_H
:
6243 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6245 case OPC2_32_RR_SHA
:
6246 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6248 case OPC2_32_RR_SHA_H
:
6249 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6251 case OPC2_32_RR_SHAS
:
6252 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6254 case OPC2_32_RR_XNOR
:
6255 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6257 case OPC2_32_RR_XOR
:
6258 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6261 tcg_temp_free(temp
);
6264 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
6270 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6271 r3
= MASK_OP_RR_D(ctx
->opcode
);
6272 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6273 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6274 n
= MASK_OP_RR_N(ctx
->opcode
);
6277 case OPC2_32_RR_ADD_A
:
6278 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6280 case OPC2_32_RR_ADDSC_A
:
6281 temp
= tcg_temp_new();
6282 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6283 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6284 tcg_temp_free(temp
);
6286 case OPC2_32_RR_ADDSC_AT
:
6287 temp
= tcg_temp_new();
6288 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6289 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6290 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6291 tcg_temp_free(temp
);
6293 case OPC2_32_RR_EQ_A
:
6294 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6297 case OPC2_32_RR_EQZ
:
6298 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6300 case OPC2_32_RR_GE_A
:
6301 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6304 case OPC2_32_RR_LT_A
:
6305 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6308 case OPC2_32_RR_MOV_A
:
6309 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6311 case OPC2_32_RR_MOV_AA
:
6312 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6314 case OPC2_32_RR_MOV_D
:
6315 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6317 case OPC2_32_RR_NE_A
:
6318 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6321 case OPC2_32_RR_NEZ_A
:
6322 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6324 case OPC2_32_RR_SUB_A
:
6325 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6330 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
6335 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6336 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6340 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6342 case OPC2_32_RR_JLI
:
6343 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
6344 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6346 case OPC2_32_RR_CALLI
:
6347 gen_helper_1arg(call
, ctx
->next_pc
);
6348 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6350 case OPC2_32_RR_FCALLI
:
6351 gen_fcall_save_ctx(ctx
);
6352 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6356 ctx
->bstate
= BS_BRANCH
;
6359 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6364 TCGv temp
, temp2
, temp3
;
6366 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6367 r3
= MASK_OP_RR_D(ctx
->opcode
);
6368 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6369 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6372 case OPC2_32_RR_BMERGE
:
6373 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6375 case OPC2_32_RR_BSPLIT
:
6376 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6378 case OPC2_32_RR_DVINIT_B
:
6379 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6382 case OPC2_32_RR_DVINIT_BU
:
6383 temp
= tcg_temp_new();
6384 temp2
= tcg_temp_new();
6385 temp3
= tcg_temp_new();
6387 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6389 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6390 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6391 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6392 tcg_gen_neg_tl(temp
, temp3
);
6393 /* use cpu_PSW_AV to compare against 0 */
6394 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6396 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6397 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6398 temp2
, cpu_gpr_d
[r2
]);
6399 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6401 /* overflow = (D[b] == 0) */
6402 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6404 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6406 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6408 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6409 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6411 tcg_temp_free(temp
);
6412 tcg_temp_free(temp2
);
6413 tcg_temp_free(temp3
);
6415 case OPC2_32_RR_DVINIT_H
:
6416 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6419 case OPC2_32_RR_DVINIT_HU
:
6420 temp
= tcg_temp_new();
6421 temp2
= tcg_temp_new();
6422 temp3
= tcg_temp_new();
6424 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6426 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6427 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6428 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6429 tcg_gen_neg_tl(temp
, temp3
);
6430 /* use cpu_PSW_AV to compare against 0 */
6431 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6433 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6434 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6435 temp2
, cpu_gpr_d
[r2
]);
6436 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6438 /* overflow = (D[b] == 0) */
6439 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6441 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6443 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6445 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6446 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6447 tcg_temp_free(temp
);
6448 tcg_temp_free(temp2
);
6449 tcg_temp_free(temp3
);
6451 case OPC2_32_RR_DVINIT
:
6452 temp
= tcg_temp_new();
6453 temp2
= tcg_temp_new();
6454 /* overflow = ((D[b] == 0) ||
6455 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6456 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6457 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6458 tcg_gen_and_tl(temp
, temp
, temp2
);
6459 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6460 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6461 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6463 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6465 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6467 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6468 /* sign extend to high reg */
6469 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6470 tcg_temp_free(temp
);
6471 tcg_temp_free(temp2
);
6473 case OPC2_32_RR_DVINIT_U
:
6474 /* overflow = (D[b] == 0) */
6475 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6476 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6478 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6480 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6482 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6483 /* zero extend to high reg*/
6484 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6486 case OPC2_32_RR_PARITY
:
6487 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6489 case OPC2_32_RR_UNPACK
:
6490 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6492 case OPC2_32_RR_CRC32
:
6493 if (tricore_feature(env
, TRICORE_FEATURE_161
)) {
6494 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6495 } /* TODO: else raise illegal opcode trap */
6497 case OPC2_32_RR_DIV
:
6498 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6499 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6501 } /* TODO: else raise illegal opcode trap */
6503 case OPC2_32_RR_DIV_U
:
6504 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6505 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6506 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6507 } /* TODO: else raise illegal opcode trap */
6513 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6521 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6522 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6523 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6524 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6525 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6528 case OPC2_32_RR1_MUL_H_32_LL
:
6529 temp64
= tcg_temp_new_i64();
6530 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6531 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6532 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6533 tcg_temp_free_i64(temp64
);
6535 case OPC2_32_RR1_MUL_H_32_LU
:
6536 temp64
= tcg_temp_new_i64();
6537 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6538 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6539 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6540 tcg_temp_free_i64(temp64
);
6542 case OPC2_32_RR1_MUL_H_32_UL
:
6543 temp64
= tcg_temp_new_i64();
6544 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6545 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6546 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6547 tcg_temp_free_i64(temp64
);
6549 case OPC2_32_RR1_MUL_H_32_UU
:
6550 temp64
= tcg_temp_new_i64();
6551 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6552 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6553 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6554 tcg_temp_free_i64(temp64
);
6556 case OPC2_32_RR1_MULM_H_64_LL
:
6557 temp64
= tcg_temp_new_i64();
6558 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6559 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6561 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6563 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6564 tcg_temp_free_i64(temp64
);
6566 case OPC2_32_RR1_MULM_H_64_LU
:
6567 temp64
= tcg_temp_new_i64();
6568 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6569 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6571 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6573 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6574 tcg_temp_free_i64(temp64
);
6576 case OPC2_32_RR1_MULM_H_64_UL
:
6577 temp64
= tcg_temp_new_i64();
6578 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6579 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6581 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6583 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6584 tcg_temp_free_i64(temp64
);
6586 case OPC2_32_RR1_MULM_H_64_UU
:
6587 temp64
= tcg_temp_new_i64();
6588 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6589 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6591 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6593 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6594 tcg_temp_free_i64(temp64
);
6597 case OPC2_32_RR1_MULR_H_16_LL
:
6598 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6599 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6601 case OPC2_32_RR1_MULR_H_16_LU
:
6602 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6603 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6605 case OPC2_32_RR1_MULR_H_16_UL
:
6606 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6607 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6609 case OPC2_32_RR1_MULR_H_16_UU
:
6610 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6611 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6617 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
6625 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6626 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6627 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6628 n
= MASK_OP_RR1_N(ctx
->opcode
);
6629 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6631 temp
= tcg_temp_new();
6632 temp2
= tcg_temp_new();
6635 case OPC2_32_RR1_MUL_Q_32
:
6636 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6638 case OPC2_32_RR1_MUL_Q_64
:
6639 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6642 case OPC2_32_RR1_MUL_Q_32_L
:
6643 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6644 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6646 case OPC2_32_RR1_MUL_Q_64_L
:
6647 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6648 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6650 case OPC2_32_RR1_MUL_Q_32_U
:
6651 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6652 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6654 case OPC2_32_RR1_MUL_Q_64_U
:
6655 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6656 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6658 case OPC2_32_RR1_MUL_Q_32_LL
:
6659 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6660 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6661 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6663 case OPC2_32_RR1_MUL_Q_32_UU
:
6664 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6665 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6666 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6668 case OPC2_32_RR1_MULR_Q_32_L
:
6669 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6670 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6671 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6673 case OPC2_32_RR1_MULR_Q_32_U
:
6674 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6675 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6676 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6679 tcg_temp_free(temp
);
6680 tcg_temp_free(temp2
);
6684 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6689 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6690 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6691 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6692 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6694 case OPC2_32_RR2_MUL_32
:
6695 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6697 case OPC2_32_RR2_MUL_64
:
6698 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6701 case OPC2_32_RR2_MULS_32
:
6702 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6705 case OPC2_32_RR2_MUL_U_64
:
6706 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6709 case OPC2_32_RR2_MULS_U_32
:
6710 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6717 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6723 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6724 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6725 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6726 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6727 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6728 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6731 case OPC2_32_RRPW_EXTR
:
6732 if (pos
+ width
<= 31) {
6733 /* optimize special cases */
6734 if ((pos
== 0) && (width
== 8)) {
6735 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6736 } else if ((pos
== 0) && (width
== 16)) {
6737 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6739 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6740 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6744 case OPC2_32_RRPW_EXTR_U
:
6746 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6748 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6749 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6752 case OPC2_32_RRPW_IMASK
:
6753 if (pos
+ width
<= 31) {
6754 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6755 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6758 case OPC2_32_RRPW_INSERT
:
6759 if (pos
+ width
<= 31) {
6760 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6768 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6774 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6775 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6776 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6777 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6778 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6781 case OPC2_32_RRR_CADD
:
6782 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6783 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6785 case OPC2_32_RRR_CADDN
:
6786 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6789 case OPC2_32_RRR_CSUB
:
6790 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6793 case OPC2_32_RRR_CSUBN
:
6794 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6797 case OPC2_32_RRR_SEL
:
6798 temp
= tcg_const_i32(0);
6799 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6800 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6801 tcg_temp_free(temp
);
6803 case OPC2_32_RRR_SELN
:
6804 temp
= tcg_const_i32(0);
6805 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6806 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6807 tcg_temp_free(temp
);
6812 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6818 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6819 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6820 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6821 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6822 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6825 case OPC2_32_RRR_DVADJ
:
6826 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6827 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6829 case OPC2_32_RRR_DVSTEP
:
6830 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6831 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6833 case OPC2_32_RRR_DVSTEP_U
:
6834 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6835 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6837 case OPC2_32_RRR_IXMAX
:
6838 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6839 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6841 case OPC2_32_RRR_IXMAX_U
:
6842 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6843 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6845 case OPC2_32_RRR_IXMIN
:
6846 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6847 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6849 case OPC2_32_RRR_IXMIN_U
:
6850 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6851 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6853 case OPC2_32_RRR_PACK
:
6854 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6855 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6861 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6864 uint32_t r1
, r2
, r3
, r4
;
6866 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6867 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6868 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6869 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6870 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6872 case OPC2_32_RRR2_MADD_32
:
6873 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6876 case OPC2_32_RRR2_MADD_64
:
6877 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6878 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6880 case OPC2_32_RRR2_MADDS_32
:
6881 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6882 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6884 case OPC2_32_RRR2_MADDS_64
:
6885 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6886 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6888 case OPC2_32_RRR2_MADD_U_64
:
6889 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6890 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6892 case OPC2_32_RRR2_MADDS_U_32
:
6893 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6894 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6896 case OPC2_32_RRR2_MADDS_U_64
:
6897 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6898 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6903 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6906 uint32_t r1
, r2
, r3
, r4
;
6908 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6909 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6910 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6911 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6912 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6915 case OPC2_32_RRR2_MSUB_32
:
6916 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6919 case OPC2_32_RRR2_MSUB_64
:
6920 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6921 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6923 case OPC2_32_RRR2_MSUBS_32
:
6924 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6925 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6927 case OPC2_32_RRR2_MSUBS_64
:
6928 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6929 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6931 case OPC2_32_RRR2_MSUB_U_64
:
6932 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6933 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6935 case OPC2_32_RRR2_MSUBS_U_32
:
6936 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6937 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6939 case OPC2_32_RRR2_MSUBS_U_64
:
6940 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6941 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6947 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6950 uint32_t r1
, r2
, r3
, r4
, n
;
6952 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6953 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6954 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6955 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6956 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6957 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6960 case OPC2_32_RRR1_MADD_H_LL
:
6961 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6962 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6964 case OPC2_32_RRR1_MADD_H_LU
:
6965 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6966 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6968 case OPC2_32_RRR1_MADD_H_UL
:
6969 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6970 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6972 case OPC2_32_RRR1_MADD_H_UU
:
6973 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6974 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6976 case OPC2_32_RRR1_MADDS_H_LL
:
6977 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6978 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6980 case OPC2_32_RRR1_MADDS_H_LU
:
6981 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6982 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6984 case OPC2_32_RRR1_MADDS_H_UL
:
6985 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6986 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6988 case OPC2_32_RRR1_MADDS_H_UU
:
6989 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6990 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6992 case OPC2_32_RRR1_MADDM_H_LL
:
6993 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6994 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6996 case OPC2_32_RRR1_MADDM_H_LU
:
6997 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6998 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7000 case OPC2_32_RRR1_MADDM_H_UL
:
7001 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7002 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7004 case OPC2_32_RRR1_MADDM_H_UU
:
7005 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7006 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7008 case OPC2_32_RRR1_MADDMS_H_LL
:
7009 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7010 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7012 case OPC2_32_RRR1_MADDMS_H_LU
:
7013 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7014 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7016 case OPC2_32_RRR1_MADDMS_H_UL
:
7017 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7018 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7020 case OPC2_32_RRR1_MADDMS_H_UU
:
7021 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7022 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7024 case OPC2_32_RRR1_MADDR_H_LL
:
7025 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7026 cpu_gpr_d
[r2
], n
, MODE_LL
);
7028 case OPC2_32_RRR1_MADDR_H_LU
:
7029 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7030 cpu_gpr_d
[r2
], n
, MODE_LU
);
7032 case OPC2_32_RRR1_MADDR_H_UL
:
7033 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7034 cpu_gpr_d
[r2
], n
, MODE_UL
);
7036 case OPC2_32_RRR1_MADDR_H_UU
:
7037 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7038 cpu_gpr_d
[r2
], n
, MODE_UU
);
7040 case OPC2_32_RRR1_MADDRS_H_LL
:
7041 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7042 cpu_gpr_d
[r2
], n
, MODE_LL
);
7044 case OPC2_32_RRR1_MADDRS_H_LU
:
7045 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7046 cpu_gpr_d
[r2
], n
, MODE_LU
);
7048 case OPC2_32_RRR1_MADDRS_H_UL
:
7049 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7050 cpu_gpr_d
[r2
], n
, MODE_UL
);
7052 case OPC2_32_RRR1_MADDRS_H_UU
:
7053 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7054 cpu_gpr_d
[r2
], n
, MODE_UU
);
7059 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7062 uint32_t r1
, r2
, r3
, r4
, n
;
7065 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7066 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7067 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7068 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7069 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7070 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7072 temp
= tcg_const_i32(n
);
7073 temp2
= tcg_temp_new();
7076 case OPC2_32_RRR1_MADD_Q_32
:
7077 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7078 cpu_gpr_d
[r2
], n
, 32, env
);
7080 case OPC2_32_RRR1_MADD_Q_64
:
7081 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7082 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7085 case OPC2_32_RRR1_MADD_Q_32_L
:
7086 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7087 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7090 case OPC2_32_RRR1_MADD_Q_64_L
:
7091 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7092 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7093 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7096 case OPC2_32_RRR1_MADD_Q_32_U
:
7097 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7098 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7101 case OPC2_32_RRR1_MADD_Q_64_U
:
7102 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7103 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7104 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7107 case OPC2_32_RRR1_MADD_Q_32_LL
:
7108 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7109 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7110 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7112 case OPC2_32_RRR1_MADD_Q_64_LL
:
7113 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7114 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7115 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7116 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7118 case OPC2_32_RRR1_MADD_Q_32_UU
:
7119 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7120 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7121 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7123 case OPC2_32_RRR1_MADD_Q_64_UU
:
7124 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7125 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7126 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7127 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7129 case OPC2_32_RRR1_MADDS_Q_32
:
7130 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7131 cpu_gpr_d
[r2
], n
, 32);
7133 case OPC2_32_RRR1_MADDS_Q_64
:
7134 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7135 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7138 case OPC2_32_RRR1_MADDS_Q_32_L
:
7139 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7140 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7143 case OPC2_32_RRR1_MADDS_Q_64_L
:
7144 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7145 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7146 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7149 case OPC2_32_RRR1_MADDS_Q_32_U
:
7150 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7151 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7154 case OPC2_32_RRR1_MADDS_Q_64_U
:
7155 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7156 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7157 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7160 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7161 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7162 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7163 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7165 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7166 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7167 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7168 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7169 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7171 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7172 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7173 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7174 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7176 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7177 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7178 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7179 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7180 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7182 case OPC2_32_RRR1_MADDR_H_64_UL
:
7183 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7184 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7186 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7187 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7188 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7190 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7191 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7192 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7193 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7195 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7196 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7197 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7198 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7200 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7201 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7202 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7203 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7205 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7206 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7207 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7208 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7211 tcg_temp_free(temp
);
7212 tcg_temp_free(temp2
);
7215 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7218 uint32_t r1
, r2
, r3
, r4
, n
;
7220 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7221 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7222 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7223 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7224 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7225 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7228 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7229 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7230 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7232 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7233 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7234 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7236 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7237 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7238 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7240 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7241 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7242 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7244 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7245 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7246 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7249 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7250 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7251 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7254 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7255 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7256 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7259 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7260 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7261 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7264 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7265 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7266 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7269 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7270 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7271 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7274 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7275 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7276 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7279 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7280 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7281 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7284 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7285 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7286 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7289 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7290 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7291 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7294 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7295 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7296 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7299 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7300 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7301 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7304 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7305 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7306 cpu_gpr_d
[r2
], n
, MODE_LL
);
7308 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7309 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7310 cpu_gpr_d
[r2
], n
, MODE_LU
);
7312 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7313 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7314 cpu_gpr_d
[r2
], n
, MODE_UL
);
7316 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7317 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7318 cpu_gpr_d
[r2
], n
, MODE_UU
);
7320 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7321 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7322 cpu_gpr_d
[r2
], n
, MODE_LL
);
7324 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7325 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7326 cpu_gpr_d
[r2
], n
, MODE_LU
);
7328 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7329 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7330 cpu_gpr_d
[r2
], n
, MODE_UL
);
7332 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7333 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7334 cpu_gpr_d
[r2
], n
, MODE_UU
);
7339 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
7342 uint32_t r1
, r2
, r3
, r4
, n
;
7344 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7345 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7346 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7347 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7348 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7349 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7352 case OPC2_32_RRR1_MSUB_H_LL
:
7353 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7354 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7356 case OPC2_32_RRR1_MSUB_H_LU
:
7357 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7358 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7360 case OPC2_32_RRR1_MSUB_H_UL
:
7361 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7362 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7364 case OPC2_32_RRR1_MSUB_H_UU
:
7365 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7366 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7368 case OPC2_32_RRR1_MSUBS_H_LL
:
7369 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7370 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7372 case OPC2_32_RRR1_MSUBS_H_LU
:
7373 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7374 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7376 case OPC2_32_RRR1_MSUBS_H_UL
:
7377 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7378 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7380 case OPC2_32_RRR1_MSUBS_H_UU
:
7381 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7382 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7384 case OPC2_32_RRR1_MSUBM_H_LL
:
7385 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7386 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7388 case OPC2_32_RRR1_MSUBM_H_LU
:
7389 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7390 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7392 case OPC2_32_RRR1_MSUBM_H_UL
:
7393 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7394 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7396 case OPC2_32_RRR1_MSUBM_H_UU
:
7397 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7398 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7400 case OPC2_32_RRR1_MSUBMS_H_LL
:
7401 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7402 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7404 case OPC2_32_RRR1_MSUBMS_H_LU
:
7405 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7406 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7408 case OPC2_32_RRR1_MSUBMS_H_UL
:
7409 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7410 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7412 case OPC2_32_RRR1_MSUBMS_H_UU
:
7413 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7414 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7416 case OPC2_32_RRR1_MSUBR_H_LL
:
7417 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7418 cpu_gpr_d
[r2
], n
, MODE_LL
);
7420 case OPC2_32_RRR1_MSUBR_H_LU
:
7421 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7422 cpu_gpr_d
[r2
], n
, MODE_LU
);
7424 case OPC2_32_RRR1_MSUBR_H_UL
:
7425 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7426 cpu_gpr_d
[r2
], n
, MODE_UL
);
7428 case OPC2_32_RRR1_MSUBR_H_UU
:
7429 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7430 cpu_gpr_d
[r2
], n
, MODE_UU
);
7432 case OPC2_32_RRR1_MSUBRS_H_LL
:
7433 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7434 cpu_gpr_d
[r2
], n
, MODE_LL
);
7436 case OPC2_32_RRR1_MSUBRS_H_LU
:
7437 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7438 cpu_gpr_d
[r2
], n
, MODE_LU
);
7440 case OPC2_32_RRR1_MSUBRS_H_UL
:
7441 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7442 cpu_gpr_d
[r2
], n
, MODE_UL
);
7444 case OPC2_32_RRR1_MSUBRS_H_UU
:
7445 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7446 cpu_gpr_d
[r2
], n
, MODE_UU
);
7451 static void decode_rrr1_msubq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7454 uint32_t r1
, r2
, r3
, r4
, n
;
7457 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7458 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7459 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7460 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7461 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7462 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7464 temp
= tcg_const_i32(n
);
7465 temp2
= tcg_temp_new();
7468 case OPC2_32_RRR1_MSUB_Q_32
:
7469 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7470 cpu_gpr_d
[r2
], n
, 32, env
);
7472 case OPC2_32_RRR1_MSUB_Q_64
:
7473 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7474 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7477 case OPC2_32_RRR1_MSUB_Q_32_L
:
7478 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7479 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7482 case OPC2_32_RRR1_MSUB_Q_64_L
:
7483 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7484 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7485 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7488 case OPC2_32_RRR1_MSUB_Q_32_U
:
7489 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7490 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7493 case OPC2_32_RRR1_MSUB_Q_64_U
:
7494 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7495 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7496 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7499 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7500 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7501 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7502 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7504 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7505 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7506 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7507 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7508 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7510 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7511 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7512 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7513 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7515 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7516 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7517 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7518 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7519 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7521 case OPC2_32_RRR1_MSUBS_Q_32
:
7522 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7523 cpu_gpr_d
[r2
], n
, 32);
7525 case OPC2_32_RRR1_MSUBS_Q_64
:
7526 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7527 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7530 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7531 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7532 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7535 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7536 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7537 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7538 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7541 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7542 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7543 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7546 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7547 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7548 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7549 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7552 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7553 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7554 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7555 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7557 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7558 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7559 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7560 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7561 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7563 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7564 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7565 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7566 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7568 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7569 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7570 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7571 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7572 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7574 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7575 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7576 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7578 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7579 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7580 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7582 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7583 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7584 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7585 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7587 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7588 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7589 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7590 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7592 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7593 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7594 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7595 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7597 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7598 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7599 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7600 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7603 tcg_temp_free(temp
);
7604 tcg_temp_free(temp2
);
7607 static void decode_rrr1_msubad_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7610 uint32_t r1
, r2
, r3
, r4
, n
;
7612 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7613 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7614 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7615 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7616 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7617 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7620 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7621 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7622 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7624 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7625 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7626 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7628 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7629 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7630 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7632 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7633 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7634 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7636 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7637 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7638 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7641 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7642 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7643 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7646 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7647 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7648 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7651 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7652 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7653 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7656 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7657 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7658 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7661 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7662 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7663 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7666 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7667 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7671 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7672 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7673 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7676 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7677 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7678 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7681 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7682 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7683 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7686 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7687 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7688 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7691 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7692 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7693 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7696 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7697 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7698 cpu_gpr_d
[r2
], n
, MODE_LL
);
7700 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7701 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7702 cpu_gpr_d
[r2
], n
, MODE_LU
);
7704 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7705 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7706 cpu_gpr_d
[r2
], n
, MODE_UL
);
7708 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7709 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7710 cpu_gpr_d
[r2
], n
, MODE_UU
);
7712 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7713 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7714 cpu_gpr_d
[r2
], n
, MODE_LL
);
7716 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7717 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7718 cpu_gpr_d
[r2
], n
, MODE_LU
);
7720 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7721 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7722 cpu_gpr_d
[r2
], n
, MODE_UL
);
7724 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7725 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7726 cpu_gpr_d
[r2
], n
, MODE_UU
);
7732 static void decode_rrrr_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7736 TCGv tmp_width
, tmp_pos
;
7738 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7739 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7740 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7741 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7742 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7744 tmp_pos
= tcg_temp_new();
7745 tmp_width
= tcg_temp_new();
7748 case OPC2_32_RRRR_DEXTR
:
7749 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7751 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7753 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7754 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7755 tcg_gen_shr_tl(tmp_pos
, cpu_gpr_d
[r2
], tmp_pos
);
7756 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, tmp_pos
);
7759 case OPC2_32_RRRR_EXTR
:
7760 case OPC2_32_RRRR_EXTR_U
:
7761 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7762 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7763 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7764 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7765 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7766 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7767 if (op2
== OPC2_32_RRRR_EXTR
) {
7768 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7770 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7773 case OPC2_32_RRRR_INSERT
:
7774 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7775 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7776 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7780 tcg_temp_free(tmp_pos
);
7781 tcg_temp_free(tmp_width
);
7785 static void decode_rrrw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7793 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7794 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7795 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7796 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7797 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7798 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7800 temp
= tcg_temp_new();
7803 case OPC2_32_RRRW_EXTR
:
7804 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7805 tcg_gen_addi_tl(temp
, temp
, width
);
7806 tcg_gen_subfi_tl(temp
, 32, temp
);
7807 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7808 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7810 case OPC2_32_RRRW_EXTR_U
:
7812 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7814 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7815 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7816 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7819 case OPC2_32_RRRW_IMASK
:
7820 temp2
= tcg_temp_new();
7822 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7823 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7824 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7825 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7826 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7828 tcg_temp_free(temp2
);
7830 case OPC2_32_RRRW_INSERT
:
7831 temp2
= tcg_temp_new();
7833 tcg_gen_movi_tl(temp
, width
);
7834 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7835 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7837 tcg_temp_free(temp2
);
7840 tcg_temp_free(temp
);
7844 static void decode_sys_interrupts(CPUTriCoreState
*env
, DisasContext
*ctx
)
7851 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7852 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7855 case OPC2_32_SYS_DEBUG
:
7856 /* raise EXCP_DEBUG */
7858 case OPC2_32_SYS_DISABLE
:
7859 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE
);
7861 case OPC2_32_SYS_DSYNC
:
7863 case OPC2_32_SYS_ENABLE
:
7864 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE
);
7866 case OPC2_32_SYS_ISYNC
:
7868 case OPC2_32_SYS_NOP
:
7870 case OPC2_32_SYS_RET
:
7871 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7873 case OPC2_32_SYS_FRET
:
7876 case OPC2_32_SYS_RFE
:
7877 gen_helper_rfe(cpu_env
);
7879 ctx
->bstate
= BS_BRANCH
;
7881 case OPC2_32_SYS_RFM
:
7882 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7883 tmp
= tcg_temp_new();
7884 l1
= gen_new_label();
7886 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7887 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7888 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7889 gen_helper_rfm(cpu_env
);
7892 ctx
->bstate
= BS_BRANCH
;
7895 /* generate privilege trap */
7898 case OPC2_32_SYS_RSLCX
:
7899 gen_helper_rslcx(cpu_env
);
7901 case OPC2_32_SYS_SVLCX
:
7902 gen_helper_svlcx(cpu_env
);
7904 case OPC2_32_SYS_RESTORE
:
7905 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
7906 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7907 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7908 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7909 } /* else raise privilege trap */
7910 } /* else raise illegal opcode trap */
7912 case OPC2_32_SYS_TRAPSV
:
7913 /* TODO: raise sticky overflow trap */
7915 case OPC2_32_SYS_TRAPV
:
7916 /* TODO: raise overflow trap */
7921 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
7925 int32_t address
, const16
;
7928 TCGv temp
, temp2
, temp3
;
7930 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7932 /* handle JNZ.T opcode only being 7 bit long */
7933 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7934 op1
= OPCM_32_BRN_JTT
;
7939 case OPCM_32_ABS_LDW
:
7940 decode_abs_ldw(env
, ctx
);
7942 case OPCM_32_ABS_LDB
:
7943 decode_abs_ldb(env
, ctx
);
7945 case OPCM_32_ABS_LDMST_SWAP
:
7946 decode_abs_ldst_swap(env
, ctx
);
7948 case OPCM_32_ABS_LDST_CONTEXT
:
7949 decode_abs_ldst_context(env
, ctx
);
7951 case OPCM_32_ABS_STORE
:
7952 decode_abs_store(env
, ctx
);
7954 case OPCM_32_ABS_STOREB_H
:
7955 decode_abs_storeb_h(env
, ctx
);
7957 case OPC1_32_ABS_STOREQ
:
7958 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7959 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7960 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7961 temp2
= tcg_temp_new();
7963 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7964 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7966 tcg_temp_free(temp2
);
7967 tcg_temp_free(temp
);
7969 case OPC1_32_ABS_LD_Q
:
7970 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7971 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7972 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7974 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7975 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7977 tcg_temp_free(temp
);
7979 case OPC1_32_ABS_LEA
:
7980 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7981 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7982 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7985 case OPC1_32_ABSB_ST_T
:
7986 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7987 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7988 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7990 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7991 temp2
= tcg_temp_new();
7993 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7994 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7995 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7996 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7998 tcg_temp_free(temp
);
7999 tcg_temp_free(temp2
);
8002 case OPC1_32_B_CALL
:
8003 case OPC1_32_B_CALLA
:
8004 case OPC1_32_B_FCALL
:
8005 case OPC1_32_B_FCALLA
:
8010 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8011 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8014 case OPCM_32_BIT_ANDACC
:
8015 decode_bit_andacc(env
, ctx
);
8017 case OPCM_32_BIT_LOGICAL_T1
:
8018 decode_bit_logical_t(env
, ctx
);
8020 case OPCM_32_BIT_INSERT
:
8021 decode_bit_insert(env
, ctx
);
8023 case OPCM_32_BIT_LOGICAL_T2
:
8024 decode_bit_logical_t2(env
, ctx
);
8026 case OPCM_32_BIT_ORAND
:
8027 decode_bit_orand(env
, ctx
);
8029 case OPCM_32_BIT_SH_LOGIC1
:
8030 decode_bit_sh_logic1(env
, ctx
);
8032 case OPCM_32_BIT_SH_LOGIC2
:
8033 decode_bit_sh_logic2(env
, ctx
);
8036 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8037 decode_bo_addrmode_post_pre_base(env
, ctx
);
8039 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8040 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
8042 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8043 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
8045 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8046 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
8048 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8049 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
8051 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8052 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
8055 case OPC1_32_BOL_LD_A_LONGOFF
:
8056 case OPC1_32_BOL_LD_W_LONGOFF
:
8057 case OPC1_32_BOL_LEA_LONGOFF
:
8058 case OPC1_32_BOL_ST_W_LONGOFF
:
8059 case OPC1_32_BOL_ST_A_LONGOFF
:
8060 case OPC1_32_BOL_LD_B_LONGOFF
:
8061 case OPC1_32_BOL_LD_BU_LONGOFF
:
8062 case OPC1_32_BOL_LD_H_LONGOFF
:
8063 case OPC1_32_BOL_LD_HU_LONGOFF
:
8064 case OPC1_32_BOL_ST_B_LONGOFF
:
8065 case OPC1_32_BOL_ST_H_LONGOFF
:
8066 decode_bol_opc(env
, ctx
, op1
);
8069 case OPCM_32_BRC_EQ_NEQ
:
8070 case OPCM_32_BRC_GE
:
8071 case OPCM_32_BRC_JLT
:
8072 case OPCM_32_BRC_JNE
:
8073 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8074 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8075 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8076 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8079 case OPCM_32_BRN_JTT
:
8080 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8081 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8082 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8085 case OPCM_32_BRR_EQ_NEQ
:
8086 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8087 case OPCM_32_BRR_GE
:
8088 case OPCM_32_BRR_JLT
:
8089 case OPCM_32_BRR_JNE
:
8090 case OPCM_32_BRR_JNZ
:
8091 case OPCM_32_BRR_LOOP
:
8092 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8093 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8094 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8095 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8098 case OPCM_32_RC_LOGICAL_SHIFT
:
8099 decode_rc_logical_shift(env
, ctx
);
8101 case OPCM_32_RC_ACCUMULATOR
:
8102 decode_rc_accumulator(env
, ctx
);
8104 case OPCM_32_RC_SERVICEROUTINE
:
8105 decode_rc_serviceroutine(env
, ctx
);
8107 case OPCM_32_RC_MUL
:
8108 decode_rc_mul(env
, ctx
);
8111 case OPCM_32_RCPW_MASK_INSERT
:
8112 decode_rcpw_insert(env
, ctx
);
8115 case OPC1_32_RCRR_INSERT
:
8116 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8117 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8118 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8119 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8120 temp
= tcg_const_i32(const16
);
8121 temp2
= tcg_temp_new(); /* width*/
8122 temp3
= tcg_temp_new(); /* pos */
8124 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8125 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8127 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8129 tcg_temp_free(temp
);
8130 tcg_temp_free(temp2
);
8131 tcg_temp_free(temp3
);
8134 case OPCM_32_RCRW_MASK_INSERT
:
8135 decode_rcrw_insert(env
, ctx
);
8138 case OPCM_32_RCR_COND_SELECT
:
8139 decode_rcr_cond_select(env
, ctx
);
8141 case OPCM_32_RCR_MADD
:
8142 decode_rcr_madd(env
, ctx
);
8144 case OPCM_32_RCR_MSUB
:
8145 decode_rcr_msub(env
, ctx
);
8148 case OPC1_32_RLC_ADDI
:
8149 case OPC1_32_RLC_ADDIH
:
8150 case OPC1_32_RLC_ADDIH_A
:
8151 case OPC1_32_RLC_MFCR
:
8152 case OPC1_32_RLC_MOV
:
8153 case OPC1_32_RLC_MOV_64
:
8154 case OPC1_32_RLC_MOV_U
:
8155 case OPC1_32_RLC_MOV_H
:
8156 case OPC1_32_RLC_MOVH_A
:
8157 case OPC1_32_RLC_MTCR
:
8158 decode_rlc_opc(env
, ctx
, op1
);
8161 case OPCM_32_RR_ACCUMULATOR
:
8162 decode_rr_accumulator(env
, ctx
);
8164 case OPCM_32_RR_LOGICAL_SHIFT
:
8165 decode_rr_logical_shift(env
, ctx
);
8167 case OPCM_32_RR_ADDRESS
:
8168 decode_rr_address(env
, ctx
);
8170 case OPCM_32_RR_IDIRECT
:
8171 decode_rr_idirect(env
, ctx
);
8173 case OPCM_32_RR_DIVIDE
:
8174 decode_rr_divide(env
, ctx
);
8177 case OPCM_32_RR1_MUL
:
8178 decode_rr1_mul(env
, ctx
);
8180 case OPCM_32_RR1_MULQ
:
8181 decode_rr1_mulq(env
, ctx
);
8184 case OPCM_32_RR2_MUL
:
8185 decode_rr2_mul(env
, ctx
);
8188 case OPCM_32_RRPW_EXTRACT_INSERT
:
8189 decode_rrpw_extract_insert(env
, ctx
);
8191 case OPC1_32_RRPW_DEXTR
:
8192 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8193 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8194 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8195 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8197 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
8199 temp
= tcg_temp_new();
8200 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], const16
);
8201 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], 32 - const16
);
8202 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
8203 tcg_temp_free(temp
);
8207 case OPCM_32_RRR_COND_SELECT
:
8208 decode_rrr_cond_select(env
, ctx
);
8210 case OPCM_32_RRR_DIVIDE
:
8211 decode_rrr_divide(env
, ctx
);
8213 case OPCM_32_RRR2_MADD
:
8214 decode_rrr2_madd(env
, ctx
);
8216 case OPCM_32_RRR2_MSUB
:
8217 decode_rrr2_msub(env
, ctx
);
8220 case OPCM_32_RRR1_MADD
:
8221 decode_rrr1_madd(env
, ctx
);
8223 case OPCM_32_RRR1_MADDQ_H
:
8224 decode_rrr1_maddq_h(env
, ctx
);
8226 case OPCM_32_RRR1_MADDSU_H
:
8227 decode_rrr1_maddsu_h(env
, ctx
);
8229 case OPCM_32_RRR1_MSUB_H
:
8230 decode_rrr1_msub(env
, ctx
);
8232 case OPCM_32_RRR1_MSUB_Q
:
8233 decode_rrr1_msubq_h(env
, ctx
);
8235 case OPCM_32_RRR1_MSUBAD_H
:
8236 decode_rrr1_msubad_h(env
, ctx
);
8239 case OPCM_32_RRRR_EXTRACT_INSERT
:
8240 decode_rrrr_extract_insert(env
, ctx
);
8242 case OPCM_32_RRRW_EXTRACT_INSERT
:
8243 decode_rrrw_extract_insert(env
, ctx
);
8246 case OPCM_32_SYS_INTERRUPTS
:
8247 decode_sys_interrupts(env
, ctx
);
8249 case OPC1_32_SYS_RSTV
:
8250 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8251 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8252 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8253 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8258 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
8260 /* 16-Bit Instruction */
8261 if ((ctx
->opcode
& 0x1) == 0) {
8262 ctx
->next_pc
= ctx
->pc
+ 2;
8263 decode_16Bit_opc(env
, ctx
);
8264 /* 32-Bit Instruction */
8266 ctx
->next_pc
= ctx
->pc
+ 4;
8267 decode_32Bit_opc(env
, ctx
);
8271 void gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8273 TriCoreCPU
*cpu
= tricore_env_get_cpu(env
);
8274 CPUState
*cs
= CPU(cpu
);
8276 target_ulong pc_start
;
8277 int num_insns
, max_insns
;
8280 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
8281 if (max_insns
== 0) {
8282 max_insns
= CF_COUNT_MASK
;
8287 if (max_insns
> TCG_MAX_INSNS
) {
8288 max_insns
= TCG_MAX_INSNS
;
8295 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
8296 ctx
.bstate
= BS_NONE
;
8297 ctx
.mem_idx
= cpu_mmu_index(env
, false);
8299 tcg_clear_temp_count();
8301 while (ctx
.bstate
== BS_NONE
) {
8302 tcg_gen_insn_start(ctx
.pc
);
8305 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
8306 decode_opc(env
, &ctx
, 0);
8308 if (num_insns
>= max_insns
|| tcg_op_buf_full()) {
8309 gen_save_pc(ctx
.next_pc
);
8313 ctx
.pc
= ctx
.next_pc
;
8316 gen_tb_end(tb
, num_insns
);
8317 tb
->size
= ctx
.pc
- pc_start
;
8318 tb
->icount
= num_insns
;
8320 if (tcg_check_temp_count()) {
8321 printf("LEAK at %08x\n", env
->PC
);
8325 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8326 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8327 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
8334 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
,
8345 void cpu_state_reset(CPUTriCoreState
*env
)
8347 /* Reset Regs to Default Value */
8351 static void tricore_tcg_init_csfr(void)
8353 cpu_PCXI
= tcg_global_mem_new(cpu_env
,
8354 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8355 cpu_PSW
= tcg_global_mem_new(cpu_env
,
8356 offsetof(CPUTriCoreState
, PSW
), "PSW");
8357 cpu_PC
= tcg_global_mem_new(cpu_env
,
8358 offsetof(CPUTriCoreState
, PC
), "PC");
8359 cpu_ICR
= tcg_global_mem_new(cpu_env
,
8360 offsetof(CPUTriCoreState
, ICR
), "ICR");
8363 void tricore_tcg_init(void)
8370 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8372 for (i
= 0 ; i
< 16 ; i
++) {
8373 cpu_gpr_a
[i
] = tcg_global_mem_new(cpu_env
,
8374 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8377 for (i
= 0 ; i
< 16 ; i
++) {
8378 cpu_gpr_d
[i
] = tcg_global_mem_new(cpu_env
,
8379 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8382 tricore_tcg_init_csfr();
8383 /* init PSW flag cache */
8384 cpu_PSW_C
= tcg_global_mem_new(cpu_env
,
8385 offsetof(CPUTriCoreState
, PSW_USB_C
),
8387 cpu_PSW_V
= tcg_global_mem_new(cpu_env
,
8388 offsetof(CPUTriCoreState
, PSW_USB_V
),
8390 cpu_PSW_SV
= tcg_global_mem_new(cpu_env
,
8391 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8393 cpu_PSW_AV
= tcg_global_mem_new(cpu_env
,
8394 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8396 cpu_PSW_SAV
= tcg_global_mem_new(cpu_env
,
8397 offsetof(CPUTriCoreState
, PSW_USB_SAV
),