2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
205 TCGv_i64 ret = tcg_temp_new_i64(); \
207 gen_helper_##name(ret, cpu_env, arg1, arg2); \
208 tcg_gen_extr_i64_i32(rl, rh, ret); \
210 tcg_temp_free_i64(ret); \
213 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
214 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
215 ((offset & 0x0fffff) << 1))
217 /* Functions for load/save to/from memory */
219 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
229 int16_t con
, TCGMemOp mop
)
231 TCGv temp
= tcg_temp_new();
232 tcg_gen_addi_tl(temp
, r2
, con
);
233 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
237 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
239 TCGv_i64 temp
= tcg_temp_new_i64();
241 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
242 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
244 tcg_temp_free_i64(temp
);
247 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
250 TCGv temp
= tcg_temp_new();
251 tcg_gen_addi_tl(temp
, base
, con
);
252 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
256 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
258 TCGv_i64 temp
= tcg_temp_new_i64();
260 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
261 /* write back to two 32 bit regs */
262 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
264 tcg_temp_free_i64(temp
);
267 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, base
, con
);
272 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
276 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
279 TCGv temp
= tcg_temp_new();
280 tcg_gen_addi_tl(temp
, r2
, off
);
281 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
282 tcg_gen_mov_tl(r2
, temp
);
286 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
289 TCGv temp
= tcg_temp_new();
290 tcg_gen_addi_tl(temp
, r2
, off
);
291 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
292 tcg_gen_mov_tl(r2
, temp
);
296 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
297 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
299 TCGv temp
= tcg_temp_new();
300 TCGv temp2
= tcg_temp_new();
302 /* temp = (M(EA, word) */
303 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
304 /* temp = temp & ~E[a][63:32]) */
305 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
306 /* temp2 = (E[a][31:0] & E[a][63:32]); */
307 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
308 /* temp = temp | temp2; */
309 tcg_gen_or_tl(temp
, temp
, temp2
);
310 /* M(EA, word) = temp; */
311 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
314 tcg_temp_free(temp2
);
317 /* tmp = M(EA, word);
320 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
322 TCGv temp
= tcg_temp_new();
324 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
325 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
326 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
331 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
333 TCGv temp
= tcg_temp_new();
334 TCGv temp2
= tcg_temp_new();
335 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
336 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
337 cpu_gpr_d
[reg
], temp
);
338 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
339 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
342 tcg_temp_free(temp2
);
345 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
347 TCGv temp
= tcg_temp_new();
348 TCGv temp2
= tcg_temp_new();
349 TCGv temp3
= tcg_temp_new();
351 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
352 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
353 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
354 tcg_gen_or_tl(temp2
, temp2
, temp3
);
355 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
356 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
359 tcg_temp_free(temp2
);
360 tcg_temp_free(temp3
);
364 /* We generate loads and store to core special function register (csfr) through
365 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
366 makros R, A and E, which allow read-only, all and endinit protected access.
367 These makros also specify in which ISA version the csfr was introduced. */
368 #define R(ADDRESS, REG, FEATURE) \
370 if (tricore_feature(env, FEATURE)) { \
371 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
374 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
375 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
376 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
378 /* since we're caching PSW make this a special case */
379 if (offset
== 0xfe04) {
380 gen_helper_psw_read(ret
, cpu_env
);
391 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
392 since no execption occurs */
393 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
395 if (tricore_feature(env, FEATURE)) { \
396 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
399 /* Endinit protected registers
400 TODO: Since the endinit bit is in a register of a not yet implemented
401 watchdog device, we handle endinit protected registers like
402 all-access registers for now. */
403 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
404 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
407 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
408 /* since we're caching PSW make this a special case */
409 if (offset
== 0xfe04) {
410 gen_helper_psw_write(cpu_env
, r1
);
417 /* generate privilege trap */
421 /* Functions for arithmetic instructions */
423 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
425 TCGv t0
= tcg_temp_new_i32();
426 TCGv result
= tcg_temp_new_i32();
427 /* Addition and set V/SV bits */
428 tcg_gen_add_tl(result
, r1
, r2
);
430 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
431 tcg_gen_xor_tl(t0
, r1
, r2
);
432 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
434 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
435 /* Calc AV/SAV bits */
436 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
437 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
439 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
440 /* write back result */
441 tcg_gen_mov_tl(ret
, result
);
443 tcg_temp_free(result
);
448 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
450 TCGv temp
= tcg_temp_new();
451 TCGv_i64 t0
= tcg_temp_new_i64();
452 TCGv_i64 t1
= tcg_temp_new_i64();
453 TCGv_i64 result
= tcg_temp_new_i64();
455 tcg_gen_add_i64(result
, r1
, r2
);
457 tcg_gen_xor_i64(t1
, result
, r1
);
458 tcg_gen_xor_i64(t0
, r1
, r2
);
459 tcg_gen_andc_i64(t1
, t1
, t0
);
460 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
462 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
463 /* calc AV/SAV bits */
464 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
465 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
466 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
468 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
469 /* write back result */
470 tcg_gen_mov_i64(ret
, result
);
473 tcg_temp_free_i64(result
);
474 tcg_temp_free_i64(t0
);
475 tcg_temp_free_i64(t1
);
479 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
480 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
481 void(*op2
)(TCGv
, TCGv
, TCGv
))
483 TCGv temp
= tcg_temp_new();
484 TCGv temp2
= tcg_temp_new();
485 TCGv temp3
= tcg_temp_new();
486 TCGv temp4
= tcg_temp_new();
488 (*op1
)(temp
, r1_low
, r2
);
490 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
491 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
492 if (op1
== tcg_gen_add_tl
) {
493 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
495 tcg_gen_and_tl(temp2
, temp2
, temp3
);
498 (*op2
)(temp3
, r1_high
, r3
);
500 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
501 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
502 if (op2
== tcg_gen_add_tl
) {
503 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
505 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
507 /* combine V0/V1 bits */
508 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
510 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 tcg_gen_mov_tl(ret_low
, temp
);
513 tcg_gen_mov_tl(ret_high
, temp3
);
515 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
516 tcg_gen_xor_tl(temp
, temp
, ret_low
);
517 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
518 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
519 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
521 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
524 tcg_temp_free(temp2
);
525 tcg_temp_free(temp3
);
526 tcg_temp_free(temp4
);
529 /* ret = r2 + (r1 * r3); */
530 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
532 TCGv_i64 t1
= tcg_temp_new_i64();
533 TCGv_i64 t2
= tcg_temp_new_i64();
534 TCGv_i64 t3
= tcg_temp_new_i64();
536 tcg_gen_ext_i32_i64(t1
, r1
);
537 tcg_gen_ext_i32_i64(t2
, r2
);
538 tcg_gen_ext_i32_i64(t3
, r3
);
540 tcg_gen_mul_i64(t1
, t1
, t3
);
541 tcg_gen_add_i64(t1
, t2
, t1
);
543 tcg_gen_trunc_i64_i32(ret
, t1
);
546 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
547 /* t1 < -0x80000000 */
548 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
549 tcg_gen_or_i64(t2
, t2
, t3
);
550 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
551 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
553 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
554 /* Calc AV/SAV bits */
555 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
556 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
558 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
560 tcg_temp_free_i64(t1
);
561 tcg_temp_free_i64(t2
);
562 tcg_temp_free_i64(t3
);
565 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
567 TCGv temp
= tcg_const_i32(con
);
568 gen_madd32_d(ret
, r1
, r2
, temp
);
573 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
576 TCGv t1
= tcg_temp_new();
577 TCGv t2
= tcg_temp_new();
578 TCGv t3
= tcg_temp_new();
579 TCGv t4
= tcg_temp_new();
581 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
582 /* only the add can overflow */
583 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
585 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
586 tcg_gen_xor_tl(t1
, r2_high
, t2
);
587 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
589 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
590 /* Calc AV/SAV bits */
591 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
592 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
594 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
595 /* write back the result */
596 tcg_gen_mov_tl(ret_low
, t3
);
597 tcg_gen_mov_tl(ret_high
, t4
);
606 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
609 TCGv_i64 t1
= tcg_temp_new_i64();
610 TCGv_i64 t2
= tcg_temp_new_i64();
611 TCGv_i64 t3
= tcg_temp_new_i64();
613 tcg_gen_extu_i32_i64(t1
, r1
);
614 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
615 tcg_gen_extu_i32_i64(t3
, r3
);
617 tcg_gen_mul_i64(t1
, t1
, t3
);
618 tcg_gen_add_i64(t2
, t2
, t1
);
619 /* write back result */
620 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
621 /* only the add overflows, if t2 < t1
623 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
624 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
625 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
627 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
628 /* Calc AV/SAV bits */
629 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
630 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
632 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
634 tcg_temp_free_i64(t1
);
635 tcg_temp_free_i64(t2
);
636 tcg_temp_free_i64(t3
);
640 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
643 TCGv temp
= tcg_const_i32(con
);
644 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
649 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
652 TCGv temp
= tcg_const_i32(con
);
653 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
658 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
659 TCGv r3
, uint32_t n
, uint32_t mode
)
661 TCGv temp
= tcg_const_i32(n
);
662 TCGv temp2
= tcg_temp_new();
663 TCGv_i64 temp64
= tcg_temp_new_i64();
666 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
669 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
672 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
675 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
678 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
679 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
680 tcg_gen_add_tl
, tcg_gen_add_tl
);
682 tcg_temp_free(temp2
);
683 tcg_temp_free_i64(temp64
);
687 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
688 TCGv r3
, uint32_t n
, uint32_t mode
)
690 TCGv temp
= tcg_const_i32(n
);
691 TCGv temp2
= tcg_temp_new();
692 TCGv_i64 temp64
= tcg_temp_new_i64();
695 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
698 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
701 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
704 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
707 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
708 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
709 tcg_gen_sub_tl
, tcg_gen_add_tl
);
711 tcg_temp_free(temp2
);
712 tcg_temp_free_i64(temp64
);
716 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
717 TCGv r3
, uint32_t n
, uint32_t mode
)
719 TCGv temp
= tcg_const_i32(n
);
720 TCGv_i64 temp64
= tcg_temp_new_i64();
721 TCGv_i64 temp64_2
= tcg_temp_new_i64();
722 TCGv_i64 temp64_3
= tcg_temp_new_i64();
725 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
728 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
731 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
734 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
737 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
738 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
739 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
740 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
741 tcg_gen_shli_i64(temp64
, temp64
, 16);
743 gen_add64_d(temp64_2
, temp64_3
, temp64
);
744 /* write back result */
745 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
748 tcg_temp_free_i64(temp64
);
749 tcg_temp_free_i64(temp64_2
);
750 tcg_temp_free_i64(temp64_3
);
753 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
756 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
757 TCGv r3
, uint32_t n
, uint32_t mode
)
759 TCGv temp
= tcg_const_i32(n
);
760 TCGv temp2
= tcg_temp_new();
761 TCGv temp3
= tcg_temp_new();
762 TCGv_i64 temp64
= tcg_temp_new_i64();
766 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
769 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
772 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
775 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
778 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
779 gen_adds(ret_low
, r1_low
, temp
);
780 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
781 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
782 gen_adds(ret_high
, r1_high
, temp2
);
784 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
785 /* combine av bits */
786 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
789 tcg_temp_free(temp2
);
790 tcg_temp_free(temp3
);
791 tcg_temp_free_i64(temp64
);
795 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
798 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
799 TCGv r3
, uint32_t n
, uint32_t mode
)
801 TCGv temp
= tcg_const_i32(n
);
802 TCGv temp2
= tcg_temp_new();
803 TCGv temp3
= tcg_temp_new();
804 TCGv_i64 temp64
= tcg_temp_new_i64();
808 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
811 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
814 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
817 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
820 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
821 gen_subs(ret_low
, r1_low
, temp
);
822 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
823 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
824 gen_adds(ret_high
, r1_high
, temp2
);
826 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
827 /* combine av bits */
828 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
831 tcg_temp_free(temp2
);
832 tcg_temp_free(temp3
);
833 tcg_temp_free_i64(temp64
);
838 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
839 TCGv r3
, uint32_t n
, uint32_t mode
)
841 TCGv temp
= tcg_const_i32(n
);
842 TCGv_i64 temp64
= tcg_temp_new_i64();
843 TCGv_i64 temp64_2
= tcg_temp_new_i64();
847 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
850 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
853 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
856 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
859 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
860 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
861 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
862 tcg_gen_shli_i64(temp64
, temp64
, 16);
863 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
865 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
866 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
869 tcg_temp_free_i64(temp64
);
870 tcg_temp_free_i64(temp64_2
);
875 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
876 TCGv r3
, uint32_t n
, uint32_t mode
)
878 TCGv temp
= tcg_const_i32(n
);
879 TCGv_i64 temp64
= tcg_temp_new_i64();
880 TCGv_i64 temp64_2
= tcg_temp_new_i64();
881 TCGv_i64 temp64_3
= tcg_temp_new_i64();
884 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
887 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
890 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
893 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
896 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
897 gen_add64_d(temp64_3
, temp64_2
, temp64
);
898 /* write back result */
899 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
902 tcg_temp_free_i64(temp64
);
903 tcg_temp_free_i64(temp64_2
);
904 tcg_temp_free_i64(temp64_3
);
908 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
909 TCGv r3
, uint32_t n
, uint32_t mode
)
911 TCGv temp
= tcg_const_i32(n
);
912 TCGv_i64 temp64
= tcg_temp_new_i64();
913 TCGv_i64 temp64_2
= tcg_temp_new_i64();
916 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
919 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
922 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
925 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
928 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
929 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
930 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
933 tcg_temp_free_i64(temp64
);
934 tcg_temp_free_i64(temp64_2
);
938 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
941 TCGv temp
= tcg_const_i32(n
);
942 TCGv_i64 temp64
= tcg_temp_new_i64();
945 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
948 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
951 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
954 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
957 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
960 tcg_temp_free_i64(temp64
);
964 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
966 TCGv temp
= tcg_temp_new();
967 TCGv temp2
= tcg_temp_new();
969 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
970 tcg_gen_shli_tl(temp
, r1
, 16);
971 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
974 tcg_temp_free(temp2
);
978 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
980 TCGv temp
= tcg_const_i32(n
);
981 TCGv temp2
= tcg_temp_new();
982 TCGv_i64 temp64
= tcg_temp_new_i64();
985 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
988 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
991 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
994 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
997 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
998 tcg_gen_shli_tl(temp
, r1
, 16);
999 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
1001 tcg_temp_free(temp
);
1002 tcg_temp_free(temp2
);
1003 tcg_temp_free_i64(temp64
);
1008 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1009 uint32_t n
, uint32_t mode
)
1011 TCGv temp
= tcg_const_i32(n
);
1012 TCGv_i64 temp64
= tcg_temp_new_i64();
1015 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1018 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1021 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1024 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1027 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1029 tcg_temp_free(temp
);
1030 tcg_temp_free_i64(temp64
);
1034 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1036 TCGv temp
= tcg_temp_new();
1037 TCGv temp2
= tcg_temp_new();
1039 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1040 tcg_gen_shli_tl(temp
, r1
, 16);
1041 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1043 tcg_temp_free(temp
);
1044 tcg_temp_free(temp2
);
1048 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1050 TCGv temp
= tcg_const_i32(n
);
1051 TCGv temp2
= tcg_temp_new();
1052 TCGv_i64 temp64
= tcg_temp_new_i64();
1055 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1058 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1061 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1064 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1067 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1068 tcg_gen_shli_tl(temp
, r1
, 16);
1069 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1071 tcg_temp_free(temp
);
1072 tcg_temp_free(temp2
);
1073 tcg_temp_free_i64(temp64
);
1077 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1079 TCGv temp
= tcg_const_i32(n
);
1080 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1081 tcg_temp_free(temp
);
1085 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1087 TCGv temp
= tcg_const_i32(n
);
1088 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1089 tcg_temp_free(temp
);
1093 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1094 uint32_t up_shift
, CPUTriCoreState
*env
)
1096 TCGv temp
= tcg_temp_new();
1097 TCGv temp2
= tcg_temp_new();
1098 TCGv temp3
= tcg_temp_new();
1099 TCGv_i64 t1
= tcg_temp_new_i64();
1100 TCGv_i64 t2
= tcg_temp_new_i64();
1101 TCGv_i64 t3
= tcg_temp_new_i64();
1103 tcg_gen_ext_i32_i64(t2
, arg2
);
1104 tcg_gen_ext_i32_i64(t3
, arg3
);
1106 tcg_gen_mul_i64(t2
, t2
, t3
);
1107 tcg_gen_shli_i64(t2
, t2
, n
);
1109 tcg_gen_ext_i32_i64(t1
, arg1
);
1110 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1112 tcg_gen_add_i64(t3
, t1
, t2
);
1113 tcg_gen_trunc_i64_i32(temp3
, t3
);
1115 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1116 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1117 tcg_gen_or_i64(t1
, t1
, t2
);
1118 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1119 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1120 /* We produce an overflow on the host if the mul before was
1121 (0x80000000 * 0x80000000) << 1). If this is the
1122 case, we negate the ovf. */
1124 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1125 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1126 tcg_gen_and_tl(temp
, temp
, temp2
);
1127 tcg_gen_shli_tl(temp
, temp
, 31);
1128 /* negate v bit, if special condition */
1129 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1132 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1133 /* Calc AV/SAV bits */
1134 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1135 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1137 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1138 /* write back result */
1139 tcg_gen_mov_tl(ret
, temp3
);
1141 tcg_temp_free(temp
);
1142 tcg_temp_free(temp2
);
1143 tcg_temp_free(temp3
);
1144 tcg_temp_free_i64(t1
);
1145 tcg_temp_free_i64(t2
);
1146 tcg_temp_free_i64(t3
);
1150 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1152 TCGv temp
= tcg_temp_new();
1153 TCGv temp2
= tcg_temp_new();
1155 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1156 } else { /* n is expected to be 1 */
1157 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1158 tcg_gen_shli_tl(temp
, temp
, 1);
1159 /* catch special case r1 = r2 = 0x8000 */
1160 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1161 tcg_gen_sub_tl(temp
, temp
, temp2
);
1163 gen_add_d(ret
, arg1
, temp
);
1165 tcg_temp_free(temp
);
1166 tcg_temp_free(temp2
);
1170 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1172 TCGv temp
= tcg_temp_new();
1173 TCGv temp2
= tcg_temp_new();
1175 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1176 } else { /* n is expected to be 1 */
1177 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1178 tcg_gen_shli_tl(temp
, temp
, 1);
1179 /* catch special case r1 = r2 = 0x8000 */
1180 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1181 tcg_gen_sub_tl(temp
, temp
, temp2
);
1183 gen_adds(ret
, arg1
, temp
);
1185 tcg_temp_free(temp
);
1186 tcg_temp_free(temp2
);
1190 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1191 TCGv arg3
, uint32_t n
)
1193 TCGv temp
= tcg_temp_new();
1194 TCGv temp2
= tcg_temp_new();
1195 TCGv_i64 t1
= tcg_temp_new_i64();
1196 TCGv_i64 t2
= tcg_temp_new_i64();
1197 TCGv_i64 t3
= tcg_temp_new_i64();
1200 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1201 } else { /* n is expected to be 1 */
1202 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1203 tcg_gen_shli_tl(temp
, temp
, 1);
1204 /* catch special case r1 = r2 = 0x8000 */
1205 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1206 tcg_gen_sub_tl(temp
, temp
, temp2
);
1208 tcg_gen_ext_i32_i64(t2
, temp
);
1209 tcg_gen_shli_i64(t2
, t2
, 16);
1210 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1211 gen_add64_d(t3
, t1
, t2
);
1212 /* write back result */
1213 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1215 tcg_temp_free_i64(t1
);
1216 tcg_temp_free_i64(t2
);
1217 tcg_temp_free_i64(t3
);
1218 tcg_temp_free(temp
);
1219 tcg_temp_free(temp2
);
1223 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1224 TCGv arg3
, uint32_t n
)
1226 TCGv temp
= tcg_temp_new();
1227 TCGv temp2
= tcg_temp_new();
1228 TCGv_i64 t1
= tcg_temp_new_i64();
1229 TCGv_i64 t2
= tcg_temp_new_i64();
1232 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1233 } else { /* n is expected to be 1 */
1234 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1235 tcg_gen_shli_tl(temp
, temp
, 1);
1236 /* catch special case r1 = r2 = 0x8000 */
1237 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1238 tcg_gen_sub_tl(temp
, temp
, temp2
);
1240 tcg_gen_ext_i32_i64(t2
, temp
);
1241 tcg_gen_shli_i64(t2
, t2
, 16);
1242 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1244 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1245 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1247 tcg_temp_free(temp
);
1248 tcg_temp_free(temp2
);
1249 tcg_temp_free_i64(t1
);
1250 tcg_temp_free_i64(t2
);
1254 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1255 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1257 TCGv_i64 t1
= tcg_temp_new_i64();
1258 TCGv_i64 t2
= tcg_temp_new_i64();
1259 TCGv_i64 t3
= tcg_temp_new_i64();
1260 TCGv_i64 t4
= tcg_temp_new_i64();
1263 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1264 tcg_gen_ext_i32_i64(t2
, arg2
);
1265 tcg_gen_ext_i32_i64(t3
, arg3
);
1267 tcg_gen_mul_i64(t2
, t2
, t3
);
1269 tcg_gen_shli_i64(t2
, t2
, 1);
1271 tcg_gen_add_i64(t4
, t1
, t2
);
1273 tcg_gen_xor_i64(t3
, t4
, t1
);
1274 tcg_gen_xor_i64(t2
, t1
, t2
);
1275 tcg_gen_andc_i64(t3
, t3
, t2
);
1276 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
1277 /* We produce an overflow on the host if the mul before was
1278 (0x80000000 * 0x80000000) << 1). If this is the
1279 case, we negate the ovf. */
1281 temp
= tcg_temp_new();
1282 temp2
= tcg_temp_new();
1283 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1284 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1285 tcg_gen_and_tl(temp
, temp
, temp2
);
1286 tcg_gen_shli_tl(temp
, temp
, 31);
1287 /* negate v bit, if special condition */
1288 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1290 tcg_temp_free(temp
);
1291 tcg_temp_free(temp2
);
1293 /* write back result */
1294 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1296 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1297 /* Calc AV/SAV bits */
1298 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1299 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1301 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1303 tcg_temp_free_i64(t1
);
1304 tcg_temp_free_i64(t2
);
1305 tcg_temp_free_i64(t3
);
1306 tcg_temp_free_i64(t4
);
1310 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1313 TCGv_i64 t1
= tcg_temp_new_i64();
1314 TCGv_i64 t2
= tcg_temp_new_i64();
1315 TCGv_i64 t3
= tcg_temp_new_i64();
1317 tcg_gen_ext_i32_i64(t1
, arg1
);
1318 tcg_gen_ext_i32_i64(t2
, arg2
);
1319 tcg_gen_ext_i32_i64(t3
, arg3
);
1321 tcg_gen_mul_i64(t2
, t2
, t3
);
1322 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1324 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1326 tcg_temp_free_i64(t1
);
1327 tcg_temp_free_i64(t2
);
1328 tcg_temp_free_i64(t3
);
1332 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1333 TCGv arg3
, uint32_t n
)
1335 TCGv_i64 r1
= tcg_temp_new_i64();
1336 TCGv temp
= tcg_const_i32(n
);
1338 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1339 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1340 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1342 tcg_temp_free_i64(r1
);
1343 tcg_temp_free(temp
);
1345 /* ret = r2 - (r1 * r3); */
1346 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1348 TCGv_i64 t1
= tcg_temp_new_i64();
1349 TCGv_i64 t2
= tcg_temp_new_i64();
1350 TCGv_i64 t3
= tcg_temp_new_i64();
1352 tcg_gen_ext_i32_i64(t1
, r1
);
1353 tcg_gen_ext_i32_i64(t2
, r2
);
1354 tcg_gen_ext_i32_i64(t3
, r3
);
1356 tcg_gen_mul_i64(t1
, t1
, t3
);
1357 tcg_gen_sub_i64(t1
, t2
, t1
);
1359 tcg_gen_trunc_i64_i32(ret
, t1
);
1362 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1363 /* result < -0x80000000 */
1364 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1365 tcg_gen_or_i64(t2
, t2
, t3
);
1366 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
1367 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1370 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1371 /* Calc AV/SAV bits */
1372 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1373 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1375 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1377 tcg_temp_free_i64(t1
);
1378 tcg_temp_free_i64(t2
);
1379 tcg_temp_free_i64(t3
);
1382 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1384 TCGv temp
= tcg_const_i32(con
);
1385 gen_msub32_d(ret
, r1
, r2
, temp
);
1386 tcg_temp_free(temp
);
1390 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1393 TCGv t1
= tcg_temp_new();
1394 TCGv t2
= tcg_temp_new();
1395 TCGv t3
= tcg_temp_new();
1396 TCGv t4
= tcg_temp_new();
1398 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1399 /* only the sub can overflow */
1400 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1402 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1403 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1404 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1406 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1407 /* Calc AV/SAV bits */
1408 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1409 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1411 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1412 /* write back the result */
1413 tcg_gen_mov_tl(ret_low
, t3
);
1414 tcg_gen_mov_tl(ret_high
, t4
);
1423 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1426 TCGv temp
= tcg_const_i32(con
);
1427 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1428 tcg_temp_free(temp
);
1432 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1435 TCGv_i64 t1
= tcg_temp_new_i64();
1436 TCGv_i64 t2
= tcg_temp_new_i64();
1437 TCGv_i64 t3
= tcg_temp_new_i64();
1439 tcg_gen_extu_i32_i64(t1
, r1
);
1440 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1441 tcg_gen_extu_i32_i64(t3
, r3
);
1443 tcg_gen_mul_i64(t1
, t1
, t3
);
1444 tcg_gen_sub_i64(t3
, t2
, t1
);
1445 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1446 /* calc V bit, only the sub can overflow, if t1 > t2 */
1447 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1448 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1449 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1451 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1452 /* Calc AV/SAV bits */
1453 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1454 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1456 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1458 tcg_temp_free_i64(t1
);
1459 tcg_temp_free_i64(t2
);
1460 tcg_temp_free_i64(t3
);
1464 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1467 TCGv temp
= tcg_const_i32(con
);
1468 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1469 tcg_temp_free(temp
);
1472 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1474 TCGv temp
= tcg_const_i32(r2
);
1475 gen_add_d(ret
, r1
, temp
);
1476 tcg_temp_free(temp
);
1478 /* calculate the carry bit too */
1479 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1481 TCGv t0
= tcg_temp_new_i32();
1482 TCGv result
= tcg_temp_new_i32();
1484 tcg_gen_movi_tl(t0
, 0);
1485 /* Addition and set C/V/SV bits */
1486 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1488 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1489 tcg_gen_xor_tl(t0
, r1
, r2
);
1490 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1492 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1493 /* Calc AV/SAV bits */
1494 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1495 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1497 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1498 /* write back result */
1499 tcg_gen_mov_tl(ret
, result
);
1501 tcg_temp_free(result
);
1505 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1507 TCGv temp
= tcg_const_i32(con
);
1508 gen_add_CC(ret
, r1
, temp
);
1509 tcg_temp_free(temp
);
1512 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1514 TCGv carry
= tcg_temp_new_i32();
1515 TCGv t0
= tcg_temp_new_i32();
1516 TCGv result
= tcg_temp_new_i32();
1518 tcg_gen_movi_tl(t0
, 0);
1519 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1520 /* Addition, carry and set C/V/SV bits */
1521 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1522 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1524 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1525 tcg_gen_xor_tl(t0
, r1
, r2
);
1526 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1528 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1529 /* Calc AV/SAV bits */
1530 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1531 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1533 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1534 /* write back result */
1535 tcg_gen_mov_tl(ret
, result
);
1537 tcg_temp_free(result
);
1539 tcg_temp_free(carry
);
1542 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1544 TCGv temp
= tcg_const_i32(con
);
1545 gen_addc_CC(ret
, r1
, temp
);
1546 tcg_temp_free(temp
);
1549 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1552 TCGv temp
= tcg_temp_new();
1553 TCGv temp2
= tcg_temp_new();
1554 TCGv result
= tcg_temp_new();
1555 TCGv mask
= tcg_temp_new();
1556 TCGv t0
= tcg_const_i32(0);
1558 /* create mask for sticky bits */
1559 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1560 tcg_gen_shli_tl(mask
, mask
, 31);
1562 tcg_gen_add_tl(result
, r1
, r2
);
1564 tcg_gen_xor_tl(temp
, result
, r1
);
1565 tcg_gen_xor_tl(temp2
, r1
, r2
);
1566 tcg_gen_andc_tl(temp
, temp
, temp2
);
1567 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1569 tcg_gen_and_tl(temp
, temp
, mask
);
1570 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1572 tcg_gen_add_tl(temp
, result
, result
);
1573 tcg_gen_xor_tl(temp
, temp
, result
);
1574 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1576 tcg_gen_and_tl(temp
, temp
, mask
);
1577 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1578 /* write back result */
1579 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1582 tcg_temp_free(temp
);
1583 tcg_temp_free(temp2
);
1584 tcg_temp_free(result
);
1585 tcg_temp_free(mask
);
1588 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1591 TCGv temp
= tcg_const_i32(r2
);
1592 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1593 tcg_temp_free(temp
);
1596 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1598 TCGv temp
= tcg_temp_new_i32();
1599 TCGv result
= tcg_temp_new_i32();
1601 tcg_gen_sub_tl(result
, r1
, r2
);
1603 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1604 tcg_gen_xor_tl(temp
, r1
, r2
);
1605 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1607 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1609 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1610 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1612 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1613 /* write back result */
1614 tcg_gen_mov_tl(ret
, result
);
1616 tcg_temp_free(temp
);
1617 tcg_temp_free(result
);
1621 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1623 TCGv temp
= tcg_temp_new();
1624 TCGv_i64 t0
= tcg_temp_new_i64();
1625 TCGv_i64 t1
= tcg_temp_new_i64();
1626 TCGv_i64 result
= tcg_temp_new_i64();
1628 tcg_gen_sub_i64(result
, r1
, r2
);
1630 tcg_gen_xor_i64(t1
, result
, r1
);
1631 tcg_gen_xor_i64(t0
, r1
, r2
);
1632 tcg_gen_and_i64(t1
, t1
, t0
);
1633 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
1635 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1636 /* calc AV/SAV bits */
1637 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
1638 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1639 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1641 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1642 /* write back result */
1643 tcg_gen_mov_i64(ret
, result
);
1645 tcg_temp_free(temp
);
1646 tcg_temp_free_i64(result
);
1647 tcg_temp_free_i64(t0
);
1648 tcg_temp_free_i64(t1
);
1651 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1653 TCGv result
= tcg_temp_new();
1654 TCGv temp
= tcg_temp_new();
1656 tcg_gen_sub_tl(result
, r1
, r2
);
1658 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1660 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1661 tcg_gen_xor_tl(temp
, r1
, r2
);
1662 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1664 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1666 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1667 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1669 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1670 /* write back result */
1671 tcg_gen_mov_tl(ret
, result
);
1673 tcg_temp_free(result
);
1674 tcg_temp_free(temp
);
1677 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1679 TCGv temp
= tcg_temp_new();
1680 tcg_gen_not_tl(temp
, r2
);
1681 gen_addc_CC(ret
, r1
, temp
);
1682 tcg_temp_free(temp
);
1685 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1688 TCGv temp
= tcg_temp_new();
1689 TCGv temp2
= tcg_temp_new();
1690 TCGv result
= tcg_temp_new();
1691 TCGv mask
= tcg_temp_new();
1692 TCGv t0
= tcg_const_i32(0);
1694 /* create mask for sticky bits */
1695 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1696 tcg_gen_shli_tl(mask
, mask
, 31);
1698 tcg_gen_sub_tl(result
, r1
, r2
);
1700 tcg_gen_xor_tl(temp
, result
, r1
);
1701 tcg_gen_xor_tl(temp2
, r1
, r2
);
1702 tcg_gen_and_tl(temp
, temp
, temp2
);
1703 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1705 tcg_gen_and_tl(temp
, temp
, mask
);
1706 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1708 tcg_gen_add_tl(temp
, result
, result
);
1709 tcg_gen_xor_tl(temp
, temp
, result
);
1710 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1712 tcg_gen_and_tl(temp
, temp
, mask
);
1713 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1714 /* write back result */
1715 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1718 tcg_temp_free(temp
);
1719 tcg_temp_free(temp2
);
1720 tcg_temp_free(result
);
1721 tcg_temp_free(mask
);
1725 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1726 TCGv r3
, uint32_t n
, uint32_t mode
)
1728 TCGv temp
= tcg_const_i32(n
);
1729 TCGv temp2
= tcg_temp_new();
1730 TCGv_i64 temp64
= tcg_temp_new_i64();
1733 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1736 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1739 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1742 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1745 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1746 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1747 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1748 tcg_temp_free(temp
);
1749 tcg_temp_free(temp2
);
1750 tcg_temp_free_i64(temp64
);
1754 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1755 TCGv r3
, uint32_t n
, uint32_t mode
)
1757 TCGv temp
= tcg_const_i32(n
);
1758 TCGv temp2
= tcg_temp_new();
1759 TCGv temp3
= tcg_temp_new();
1760 TCGv_i64 temp64
= tcg_temp_new_i64();
1764 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1767 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1770 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1773 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1776 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1777 gen_subs(ret_low
, r1_low
, temp
);
1778 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1779 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1780 gen_subs(ret_high
, r1_high
, temp2
);
1781 /* combine v bits */
1782 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1783 /* combine av bits */
1784 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1786 tcg_temp_free(temp
);
1787 tcg_temp_free(temp2
);
1788 tcg_temp_free(temp3
);
1789 tcg_temp_free_i64(temp64
);
1793 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1794 TCGv r3
, uint32_t n
, uint32_t mode
)
1796 TCGv temp
= tcg_const_i32(n
);
1797 TCGv_i64 temp64
= tcg_temp_new_i64();
1798 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1799 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1802 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1805 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1808 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1811 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1814 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1815 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1816 /* write back result */
1817 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1819 tcg_temp_free(temp
);
1820 tcg_temp_free_i64(temp64
);
1821 tcg_temp_free_i64(temp64_2
);
1822 tcg_temp_free_i64(temp64_3
);
1826 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1827 TCGv r3
, uint32_t n
, uint32_t mode
)
1829 TCGv temp
= tcg_const_i32(n
);
1830 TCGv_i64 temp64
= tcg_temp_new_i64();
1831 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1834 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1837 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1840 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1843 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1846 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1847 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1848 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1850 tcg_temp_free(temp
);
1851 tcg_temp_free_i64(temp64
);
1852 tcg_temp_free_i64(temp64_2
);
1856 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1859 TCGv temp
= tcg_const_i32(n
);
1860 TCGv_i64 temp64
= tcg_temp_new_i64();
1863 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1866 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1869 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1872 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1875 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1877 tcg_temp_free(temp
);
1878 tcg_temp_free_i64(temp64
);
1882 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1884 TCGv temp
= tcg_temp_new();
1885 TCGv temp2
= tcg_temp_new();
1887 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1888 tcg_gen_shli_tl(temp
, r1
, 16);
1889 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1891 tcg_temp_free(temp
);
1892 tcg_temp_free(temp2
);
1896 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1897 uint32_t n
, uint32_t mode
)
1899 TCGv temp
= tcg_const_i32(n
);
1900 TCGv_i64 temp64
= tcg_temp_new_i64();
1903 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1906 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1909 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1912 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1915 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1917 tcg_temp_free(temp
);
1918 tcg_temp_free_i64(temp64
);
1922 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1924 TCGv temp
= tcg_temp_new();
1925 TCGv temp2
= tcg_temp_new();
1927 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1928 tcg_gen_shli_tl(temp
, r1
, 16);
1929 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1931 tcg_temp_free(temp
);
1932 tcg_temp_free(temp2
);
1936 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1938 TCGv temp
= tcg_const_i32(n
);
1939 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1940 tcg_temp_free(temp
);
1944 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1946 TCGv temp
= tcg_const_i32(n
);
1947 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1948 tcg_temp_free(temp
);
1952 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1953 uint32_t up_shift
, CPUTriCoreState
*env
)
1955 TCGv temp
= tcg_temp_new();
1956 TCGv temp2
= tcg_temp_new();
1957 TCGv temp3
= tcg_temp_new();
1958 TCGv_i64 t1
= tcg_temp_new_i64();
1959 TCGv_i64 t2
= tcg_temp_new_i64();
1960 TCGv_i64 t3
= tcg_temp_new_i64();
1961 TCGv_i64 t4
= tcg_temp_new_i64();
1963 tcg_gen_ext_i32_i64(t2
, arg2
);
1964 tcg_gen_ext_i32_i64(t3
, arg3
);
1966 tcg_gen_mul_i64(t2
, t2
, t3
);
1968 tcg_gen_ext_i32_i64(t1
, arg1
);
1969 /* if we shift part of the fraction out, we need to round up */
1970 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1971 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1972 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1973 tcg_gen_add_i64(t2
, t2
, t4
);
1975 tcg_gen_sub_i64(t3
, t1
, t2
);
1976 tcg_gen_trunc_i64_i32(temp3
, t3
);
1978 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1979 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1980 tcg_gen_or_i64(t1
, t1
, t2
);
1981 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1982 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1983 /* We produce an overflow on the host if the mul before was
1984 (0x80000000 * 0x80000000) << 1). If this is the
1985 case, we negate the ovf. */
1987 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1988 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1989 tcg_gen_and_tl(temp
, temp
, temp2
);
1990 tcg_gen_shli_tl(temp
, temp
, 31);
1991 /* negate v bit, if special condition */
1992 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1995 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1996 /* Calc AV/SAV bits */
1997 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1998 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
2000 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2001 /* write back result */
2002 tcg_gen_mov_tl(ret
, temp3
);
2004 tcg_temp_free(temp
);
2005 tcg_temp_free(temp2
);
2006 tcg_temp_free(temp3
);
2007 tcg_temp_free_i64(t1
);
2008 tcg_temp_free_i64(t2
);
2009 tcg_temp_free_i64(t3
);
2010 tcg_temp_free_i64(t4
);
2014 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2016 TCGv temp
= tcg_temp_new();
2017 TCGv temp2
= tcg_temp_new();
2019 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2020 } else { /* n is expected to be 1 */
2021 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2022 tcg_gen_shli_tl(temp
, temp
, 1);
2023 /* catch special case r1 = r2 = 0x8000 */
2024 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2025 tcg_gen_sub_tl(temp
, temp
, temp2
);
2027 gen_sub_d(ret
, arg1
, temp
);
2029 tcg_temp_free(temp
);
2030 tcg_temp_free(temp2
);
2034 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2036 TCGv temp
= tcg_temp_new();
2037 TCGv temp2
= tcg_temp_new();
2039 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2040 } else { /* n is expected to be 1 */
2041 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2042 tcg_gen_shli_tl(temp
, temp
, 1);
2043 /* catch special case r1 = r2 = 0x8000 */
2044 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2045 tcg_gen_sub_tl(temp
, temp
, temp2
);
2047 gen_subs(ret
, arg1
, temp
);
2049 tcg_temp_free(temp
);
2050 tcg_temp_free(temp2
);
2054 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2055 TCGv arg3
, uint32_t n
)
2057 TCGv temp
= tcg_temp_new();
2058 TCGv temp2
= tcg_temp_new();
2059 TCGv_i64 t1
= tcg_temp_new_i64();
2060 TCGv_i64 t2
= tcg_temp_new_i64();
2061 TCGv_i64 t3
= tcg_temp_new_i64();
2064 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2065 } else { /* n is expected to be 1 */
2066 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2067 tcg_gen_shli_tl(temp
, temp
, 1);
2068 /* catch special case r1 = r2 = 0x8000 */
2069 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2070 tcg_gen_sub_tl(temp
, temp
, temp2
);
2072 tcg_gen_ext_i32_i64(t2
, temp
);
2073 tcg_gen_shli_i64(t2
, t2
, 16);
2074 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2075 gen_sub64_d(t3
, t1
, t2
);
2076 /* write back result */
2077 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2079 tcg_temp_free_i64(t1
);
2080 tcg_temp_free_i64(t2
);
2081 tcg_temp_free_i64(t3
);
2082 tcg_temp_free(temp
);
2083 tcg_temp_free(temp2
);
2087 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2088 TCGv arg3
, uint32_t n
)
2090 TCGv temp
= tcg_temp_new();
2091 TCGv temp2
= tcg_temp_new();
2092 TCGv_i64 t1
= tcg_temp_new_i64();
2093 TCGv_i64 t2
= tcg_temp_new_i64();
2096 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2097 } else { /* n is expected to be 1 */
2098 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2099 tcg_gen_shli_tl(temp
, temp
, 1);
2100 /* catch special case r1 = r2 = 0x8000 */
2101 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2102 tcg_gen_sub_tl(temp
, temp
, temp2
);
2104 tcg_gen_ext_i32_i64(t2
, temp
);
2105 tcg_gen_shli_i64(t2
, t2
, 16);
2106 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2108 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2109 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2111 tcg_temp_free(temp
);
2112 tcg_temp_free(temp2
);
2113 tcg_temp_free_i64(t1
);
2114 tcg_temp_free_i64(t2
);
2118 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2119 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2121 TCGv_i64 t1
= tcg_temp_new_i64();
2122 TCGv_i64 t2
= tcg_temp_new_i64();
2123 TCGv_i64 t3
= tcg_temp_new_i64();
2124 TCGv_i64 t4
= tcg_temp_new_i64();
2127 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2128 tcg_gen_ext_i32_i64(t2
, arg2
);
2129 tcg_gen_ext_i32_i64(t3
, arg3
);
2131 tcg_gen_mul_i64(t2
, t2
, t3
);
2133 tcg_gen_shli_i64(t2
, t2
, 1);
2135 tcg_gen_sub_i64(t4
, t1
, t2
);
2137 tcg_gen_xor_i64(t3
, t4
, t1
);
2138 tcg_gen_xor_i64(t2
, t1
, t2
);
2139 tcg_gen_and_i64(t3
, t3
, t2
);
2140 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
2141 /* We produce an overflow on the host if the mul before was
2142 (0x80000000 * 0x80000000) << 1). If this is the
2143 case, we negate the ovf. */
2145 temp
= tcg_temp_new();
2146 temp2
= tcg_temp_new();
2147 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2148 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2149 tcg_gen_and_tl(temp
, temp
, temp2
);
2150 tcg_gen_shli_tl(temp
, temp
, 31);
2151 /* negate v bit, if special condition */
2152 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2154 tcg_temp_free(temp
);
2155 tcg_temp_free(temp2
);
2157 /* write back result */
2158 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2160 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2161 /* Calc AV/SAV bits */
2162 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2163 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2165 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2167 tcg_temp_free_i64(t1
);
2168 tcg_temp_free_i64(t2
);
2169 tcg_temp_free_i64(t3
);
2170 tcg_temp_free_i64(t4
);
2174 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2177 TCGv_i64 t1
= tcg_temp_new_i64();
2178 TCGv_i64 t2
= tcg_temp_new_i64();
2179 TCGv_i64 t3
= tcg_temp_new_i64();
2180 TCGv_i64 t4
= tcg_temp_new_i64();
2182 tcg_gen_ext_i32_i64(t1
, arg1
);
2183 tcg_gen_ext_i32_i64(t2
, arg2
);
2184 tcg_gen_ext_i32_i64(t3
, arg3
);
2186 tcg_gen_mul_i64(t2
, t2
, t3
);
2187 /* if we shift part of the fraction out, we need to round up */
2188 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2189 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2190 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2191 tcg_gen_add_i64(t3
, t3
, t4
);
2193 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2195 tcg_temp_free_i64(t1
);
2196 tcg_temp_free_i64(t2
);
2197 tcg_temp_free_i64(t3
);
2198 tcg_temp_free_i64(t4
);
2202 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2203 TCGv arg3
, uint32_t n
)
2205 TCGv_i64 r1
= tcg_temp_new_i64();
2206 TCGv temp
= tcg_const_i32(n
);
2208 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2209 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2210 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2212 tcg_temp_free_i64(r1
);
2213 tcg_temp_free(temp
);
2217 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2218 TCGv r3
, uint32_t n
, uint32_t mode
)
2220 TCGv temp
= tcg_const_i32(n
);
2221 TCGv temp2
= tcg_temp_new();
2222 TCGv_i64 temp64
= tcg_temp_new_i64();
2225 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2228 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2231 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2234 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2237 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2238 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2239 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2240 tcg_temp_free(temp
);
2241 tcg_temp_free(temp2
);
2242 tcg_temp_free_i64(temp64
);
2246 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2247 TCGv r3
, uint32_t n
, uint32_t mode
)
2249 TCGv temp
= tcg_const_i32(n
);
2250 TCGv_i64 temp64
= tcg_temp_new_i64();
2251 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2252 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2255 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2258 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2261 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2264 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2267 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2268 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2269 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2270 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2271 tcg_gen_shli_i64(temp64
, temp64
, 16);
2273 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2274 /* write back result */
2275 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2277 tcg_temp_free(temp
);
2278 tcg_temp_free_i64(temp64
);
2279 tcg_temp_free_i64(temp64_2
);
2280 tcg_temp_free_i64(temp64_3
);
2284 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2286 TCGv temp
= tcg_const_i32(n
);
2287 TCGv temp2
= tcg_temp_new();
2288 TCGv_i64 temp64
= tcg_temp_new_i64();
2291 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2294 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2297 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2300 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2303 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2304 tcg_gen_shli_tl(temp
, r1
, 16);
2305 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2307 tcg_temp_free(temp
);
2308 tcg_temp_free(temp2
);
2309 tcg_temp_free_i64(temp64
);
2313 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2314 TCGv r3
, uint32_t n
, uint32_t mode
)
2316 TCGv temp
= tcg_const_i32(n
);
2317 TCGv temp2
= tcg_temp_new();
2318 TCGv temp3
= tcg_temp_new();
2319 TCGv_i64 temp64
= tcg_temp_new_i64();
2323 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2326 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2329 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2332 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2335 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2336 gen_adds(ret_low
, r1_low
, temp
);
2337 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2338 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2339 gen_subs(ret_high
, r1_high
, temp2
);
2340 /* combine v bits */
2341 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2342 /* combine av bits */
2343 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2345 tcg_temp_free(temp
);
2346 tcg_temp_free(temp2
);
2347 tcg_temp_free(temp3
);
2348 tcg_temp_free_i64(temp64
);
2352 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2353 TCGv r3
, uint32_t n
, uint32_t mode
)
2355 TCGv temp
= tcg_const_i32(n
);
2356 TCGv_i64 temp64
= tcg_temp_new_i64();
2357 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2361 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2364 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2367 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2370 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2373 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2374 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2375 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2376 tcg_gen_shli_i64(temp64
, temp64
, 16);
2377 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2379 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2380 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2382 tcg_temp_free(temp
);
2383 tcg_temp_free_i64(temp64
);
2384 tcg_temp_free_i64(temp64_2
);
2388 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2390 TCGv temp
= tcg_const_i32(n
);
2391 TCGv temp2
= tcg_temp_new();
2392 TCGv_i64 temp64
= tcg_temp_new_i64();
2395 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2398 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2401 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2404 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2407 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2408 tcg_gen_shli_tl(temp
, r1
, 16);
2409 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2411 tcg_temp_free(temp
);
2412 tcg_temp_free(temp2
);
2413 tcg_temp_free_i64(temp64
);
2416 static inline void gen_abs(TCGv ret
, TCGv r1
)
2418 TCGv temp
= tcg_temp_new();
2419 TCGv t0
= tcg_const_i32(0);
2421 tcg_gen_neg_tl(temp
, r1
);
2422 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2423 /* overflow can only happen, if r1 = 0x80000000 */
2424 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2425 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2427 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2429 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2430 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2432 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2434 tcg_temp_free(temp
);
2438 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2440 TCGv temp
= tcg_temp_new_i32();
2441 TCGv result
= tcg_temp_new_i32();
2443 tcg_gen_sub_tl(result
, r1
, r2
);
2444 tcg_gen_sub_tl(temp
, r2
, r1
);
2445 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2448 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2449 tcg_gen_xor_tl(temp
, result
, r2
);
2450 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2451 tcg_gen_xor_tl(temp
, r1
, r2
);
2452 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2454 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2456 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2457 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2459 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2460 /* write back result */
2461 tcg_gen_mov_tl(ret
, result
);
2463 tcg_temp_free(temp
);
2464 tcg_temp_free(result
);
2467 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2469 TCGv temp
= tcg_const_i32(con
);
2470 gen_absdif(ret
, r1
, temp
);
2471 tcg_temp_free(temp
);
2474 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2476 TCGv temp
= tcg_const_i32(con
);
2477 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2478 tcg_temp_free(temp
);
2481 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2483 TCGv high
= tcg_temp_new();
2484 TCGv low
= tcg_temp_new();
2486 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2487 tcg_gen_mov_tl(ret
, low
);
2489 tcg_gen_sari_tl(low
, low
, 31);
2490 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2491 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2493 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2495 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2496 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2498 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2500 tcg_temp_free(high
);
2504 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2506 TCGv temp
= tcg_const_i32(con
);
2507 gen_mul_i32s(ret
, r1
, temp
);
2508 tcg_temp_free(temp
);
2511 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2513 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2515 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2517 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2519 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2520 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2522 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2525 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2528 TCGv temp
= tcg_const_i32(con
);
2529 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2530 tcg_temp_free(temp
);
2533 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2535 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2537 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2539 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2541 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2542 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2544 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2547 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2550 TCGv temp
= tcg_const_i32(con
);
2551 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2552 tcg_temp_free(temp
);
2555 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2557 TCGv temp
= tcg_const_i32(con
);
2558 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2559 tcg_temp_free(temp
);
2562 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2564 TCGv temp
= tcg_const_i32(con
);
2565 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2566 tcg_temp_free(temp
);
2568 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2569 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2571 TCGv temp
= tcg_const_i32(con
);
2572 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2573 tcg_temp_free(temp
);
2576 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2578 TCGv temp
= tcg_const_i32(con
);
2579 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2580 tcg_temp_free(temp
);
2584 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2586 TCGv temp
= tcg_temp_new();
2587 TCGv_i64 temp_64
= tcg_temp_new_i64();
2588 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2591 if (up_shift
== 32) {
2592 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2593 } else if (up_shift
== 16) {
2594 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2595 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2597 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2598 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2599 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2601 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2604 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2605 } else { /* n is expected to be 1 */
2606 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2607 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2609 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2611 if (up_shift
== 0) {
2612 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2614 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2616 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2617 /* overflow only occurs if r1 = r2 = 0x8000 */
2618 if (up_shift
== 0) {/* result is 64 bit */
2619 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2621 } else { /* result is 32 bit */
2622 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2625 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2626 /* calc sv overflow bit */
2627 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2629 /* calc av overflow bit */
2630 if (up_shift
== 0) {
2631 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2632 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2634 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2635 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2637 /* calc sav overflow bit */
2638 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2639 tcg_temp_free(temp
);
2640 tcg_temp_free_i64(temp_64
);
2641 tcg_temp_free_i64(temp2_64
);
2645 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2647 TCGv temp
= tcg_temp_new();
2649 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2650 } else { /* n is expected to be 1 */
2651 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2652 tcg_gen_shli_tl(ret
, ret
, 1);
2653 /* catch special case r1 = r2 = 0x8000 */
2654 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2655 tcg_gen_sub_tl(ret
, ret
, temp
);
2658 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2659 /* calc av overflow bit */
2660 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2661 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2662 /* calc sav overflow bit */
2663 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2665 tcg_temp_free(temp
);
2668 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2670 TCGv temp
= tcg_temp_new();
2672 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2673 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2675 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2676 tcg_gen_shli_tl(ret
, ret
, 1);
2677 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2678 /* catch special case r1 = r2 = 0x8000 */
2679 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2680 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2681 tcg_gen_sub_tl(ret
, ret
, temp
);
2684 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2685 /* calc av overflow bit */
2686 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2687 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2688 /* calc sav overflow bit */
2689 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2690 /* cut halfword off */
2691 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2693 tcg_temp_free(temp
);
2697 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2700 TCGv_i64 temp64
= tcg_temp_new_i64();
2701 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2702 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2703 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2704 tcg_temp_free_i64(temp64
);
2708 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2711 TCGv temp
= tcg_const_i32(con
);
2712 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2713 tcg_temp_free(temp
);
2717 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2720 TCGv_i64 temp64
= tcg_temp_new_i64();
2721 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2722 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2723 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2724 tcg_temp_free_i64(temp64
);
2728 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2731 TCGv temp
= tcg_const_i32(con
);
2732 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2733 tcg_temp_free(temp
);
2736 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2738 TCGv temp
= tcg_const_i32(con
);
2739 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2740 tcg_temp_free(temp
);
2743 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2745 TCGv temp
= tcg_const_i32(con
);
2746 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2747 tcg_temp_free(temp
);
2751 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2754 TCGv_i64 temp64
= tcg_temp_new_i64();
2755 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2756 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2757 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2758 tcg_temp_free_i64(temp64
);
2762 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2765 TCGv temp
= tcg_const_i32(con
);
2766 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2767 tcg_temp_free(temp
);
2771 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2774 TCGv_i64 temp64
= tcg_temp_new_i64();
2775 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2776 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2777 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2778 tcg_temp_free_i64(temp64
);
2782 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2785 TCGv temp
= tcg_const_i32(con
);
2786 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2787 tcg_temp_free(temp
);
2790 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2792 TCGv sat_neg
= tcg_const_i32(low
);
2793 TCGv temp
= tcg_const_i32(up
);
2795 /* sat_neg = (arg < low ) ? low : arg; */
2796 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2798 /* ret = (sat_neg > up ) ? up : sat_neg; */
2799 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2801 tcg_temp_free(sat_neg
);
2802 tcg_temp_free(temp
);
2805 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2807 TCGv temp
= tcg_const_i32(up
);
2808 /* sat_neg = (arg > up ) ? up : arg; */
2809 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2810 tcg_temp_free(temp
);
2813 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2815 if (shift_count
== -32) {
2816 tcg_gen_movi_tl(ret
, 0);
2817 } else if (shift_count
>= 0) {
2818 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2820 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2824 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2826 TCGv temp_low
, temp_high
;
2828 if (shiftcount
== -16) {
2829 tcg_gen_movi_tl(ret
, 0);
2831 temp_high
= tcg_temp_new();
2832 temp_low
= tcg_temp_new();
2834 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2835 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2836 gen_shi(temp_low
, temp_low
, shiftcount
);
2837 gen_shi(ret
, temp_high
, shiftcount
);
2838 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2840 tcg_temp_free(temp_low
);
2841 tcg_temp_free(temp_high
);
2845 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2847 uint32_t msk
, msk_start
;
2848 TCGv temp
= tcg_temp_new();
2849 TCGv temp2
= tcg_temp_new();
2850 TCGv t_0
= tcg_const_i32(0);
2852 if (shift_count
== 0) {
2853 /* Clear PSW.C and PSW.V */
2854 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2855 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2856 tcg_gen_mov_tl(ret
, r1
);
2857 } else if (shift_count
== -32) {
2859 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2860 /* fill ret completly with sign bit */
2861 tcg_gen_sari_tl(ret
, r1
, 31);
2863 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2864 } else if (shift_count
> 0) {
2865 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2866 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2869 msk_start
= 32 - shift_count
;
2870 msk
= ((1 << shift_count
) - 1) << msk_start
;
2871 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2872 /* calc v/sv bits */
2873 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2874 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2875 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2876 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2878 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2880 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2882 tcg_temp_free(t_max
);
2883 tcg_temp_free(t_min
);
2886 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2888 msk
= (1 << -shift_count
) - 1;
2889 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2891 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2893 /* calc av overflow bit */
2894 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2895 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2896 /* calc sav overflow bit */
2897 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2899 tcg_temp_free(temp
);
2900 tcg_temp_free(temp2
);
2904 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2906 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2909 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2911 TCGv temp
= tcg_const_i32(con
);
2912 gen_shas(ret
, r1
, temp
);
2913 tcg_temp_free(temp
);
2916 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2920 if (shift_count
== 0) {
2921 tcg_gen_mov_tl(ret
, r1
);
2922 } else if (shift_count
> 0) {
2923 low
= tcg_temp_new();
2924 high
= tcg_temp_new();
2926 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2927 tcg_gen_shli_tl(low
, r1
, shift_count
);
2928 tcg_gen_shli_tl(ret
, high
, shift_count
);
2929 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2932 tcg_temp_free(high
);
2934 low
= tcg_temp_new();
2935 high
= tcg_temp_new();
2937 tcg_gen_ext16s_tl(low
, r1
);
2938 tcg_gen_sari_tl(low
, low
, -shift_count
);
2939 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2940 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2943 tcg_temp_free(high
);
2948 /* ret = {ret[30:0], (r1 cond r2)}; */
2949 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2951 TCGv temp
= tcg_temp_new();
2952 TCGv temp2
= tcg_temp_new();
2954 tcg_gen_shli_tl(temp
, ret
, 1);
2955 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2956 tcg_gen_or_tl(ret
, temp
, temp2
);
2958 tcg_temp_free(temp
);
2959 tcg_temp_free(temp2
);
2962 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2964 TCGv temp
= tcg_const_i32(con
);
2965 gen_sh_cond(cond
, ret
, r1
, temp
);
2966 tcg_temp_free(temp
);
2969 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2971 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2974 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2976 TCGv temp
= tcg_const_i32(con
);
2977 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2978 tcg_temp_free(temp
);
2981 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2983 TCGv temp
= tcg_const_i32(con
);
2984 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2985 tcg_temp_free(temp
);
2988 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2990 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2993 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2995 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2998 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
3000 void(*op1
)(TCGv
, TCGv
, TCGv
),
3001 void(*op2
)(TCGv
, TCGv
, TCGv
))
3005 temp1
= tcg_temp_new();
3006 temp2
= tcg_temp_new();
3008 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3009 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3011 (*op1
)(temp1
, temp1
, temp2
);
3012 (*op2
)(temp1
, ret
, temp1
);
3014 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
3016 tcg_temp_free(temp1
);
3017 tcg_temp_free(temp2
);
3020 /* ret = r1[pos1] op1 r2[pos2]; */
3021 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
3023 void(*op1
)(TCGv
, TCGv
, TCGv
))
3027 temp1
= tcg_temp_new();
3028 temp2
= tcg_temp_new();
3030 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3031 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3033 (*op1
)(ret
, temp1
, temp2
);
3035 tcg_gen_andi_tl(ret
, ret
, 0x1);
3037 tcg_temp_free(temp1
);
3038 tcg_temp_free(temp2
);
3041 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3042 void(*op
)(TCGv
, TCGv
, TCGv
))
3044 TCGv temp
= tcg_temp_new();
3045 TCGv temp2
= tcg_temp_new();
3046 /* temp = (arg1 cond arg2 )*/
3047 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3049 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3050 /* temp = temp insn temp2 */
3051 (*op
)(temp
, temp
, temp2
);
3052 /* ret = {ret[31:1], temp} */
3053 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3055 tcg_temp_free(temp
);
3056 tcg_temp_free(temp2
);
3060 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3061 void(*op
)(TCGv
, TCGv
, TCGv
))
3063 TCGv temp
= tcg_const_i32(con
);
3064 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3065 tcg_temp_free(temp
);
3068 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3069 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3071 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3072 tcg_gen_neg_tl(ret
, ret
);
3075 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3077 TCGv b0
= tcg_temp_new();
3078 TCGv b1
= tcg_temp_new();
3079 TCGv b2
= tcg_temp_new();
3080 TCGv b3
= tcg_temp_new();
3083 tcg_gen_andi_tl(b0
, r1
, 0xff);
3084 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3087 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3088 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3091 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3092 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3095 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3096 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3099 tcg_gen_or_tl(ret
, b0
, b1
);
3100 tcg_gen_or_tl(ret
, ret
, b2
);
3101 tcg_gen_or_tl(ret
, ret
, b3
);
3109 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3111 TCGv h0
= tcg_temp_new();
3112 TCGv h1
= tcg_temp_new();
3115 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3116 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3119 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3120 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3123 tcg_gen_or_tl(ret
, h0
, h1
);
3128 /* mask = ((1 << width) -1) << pos;
3129 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3130 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3132 TCGv mask
= tcg_temp_new();
3133 TCGv temp
= tcg_temp_new();
3134 TCGv temp2
= tcg_temp_new();
3136 tcg_gen_movi_tl(mask
, 1);
3137 tcg_gen_shl_tl(mask
, mask
, width
);
3138 tcg_gen_subi_tl(mask
, mask
, 1);
3139 tcg_gen_shl_tl(mask
, mask
, pos
);
3141 tcg_gen_shl_tl(temp
, r2
, pos
);
3142 tcg_gen_and_tl(temp
, temp
, mask
);
3143 tcg_gen_andc_tl(temp2
, r1
, mask
);
3144 tcg_gen_or_tl(ret
, temp
, temp2
);
3146 tcg_temp_free(mask
);
3147 tcg_temp_free(temp
);
3148 tcg_temp_free(temp2
);
3151 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3153 TCGv_i64 temp
= tcg_temp_new_i64();
3155 gen_helper_bsplit(temp
, r1
);
3156 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3158 tcg_temp_free_i64(temp
);
3161 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3163 TCGv_i64 temp
= tcg_temp_new_i64();
3165 gen_helper_unpack(temp
, r1
);
3166 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3168 tcg_temp_free_i64(temp
);
3172 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3174 TCGv_i64 ret
= tcg_temp_new_i64();
3176 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3177 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3179 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3181 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3183 tcg_temp_free_i64(ret
);
3187 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3189 TCGv_i64 ret
= tcg_temp_new_i64();
3191 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3192 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
3194 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
3196 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3198 tcg_temp_free_i64(ret
);
3201 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
3203 TCGv temp
= tcg_temp_new();
3205 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
3206 tcg_gen_xor_tl(temp
, temp
, arg_low
);
3207 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
3208 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
3209 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3211 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3212 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3213 tcg_temp_free(temp
);
3216 static void gen_calc_usb_mulr_h(TCGv arg
)
3218 TCGv temp
= tcg_temp_new();
3220 tcg_gen_add_tl(temp
, arg
, arg
);
3221 tcg_gen_xor_tl(temp
, temp
, arg
);
3222 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
3223 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3225 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3227 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3228 tcg_temp_free(temp
);
3231 /* helpers for generating program flow micro-ops */
3233 static inline void gen_save_pc(target_ulong pc
)
3235 tcg_gen_movi_tl(cpu_PC
, pc
);
3238 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
3240 TranslationBlock
*tb
;
3242 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
3243 likely(!ctx
->singlestep_enabled
)) {
3246 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3249 if (ctx
->singlestep_enabled
) {
3250 /* raise exception debug */
3256 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3257 TCGv r2
, int16_t address
)
3259 TCGLabel
*jumpLabel
= gen_new_label();
3260 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
3262 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
3264 gen_set_label(jumpLabel
);
3265 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
3268 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3269 int r2
, int16_t address
)
3271 TCGv temp
= tcg_const_i32(r2
);
3272 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
3273 tcg_temp_free(temp
);
3276 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
3278 TCGLabel
*l1
= gen_new_label();
3280 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
3281 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
3282 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
3284 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
3287 static void gen_fcall_save_ctx(DisasContext
*ctx
)
3289 TCGv temp
= tcg_temp_new();
3291 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
3292 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
3293 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3294 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
3296 tcg_temp_free(temp
);
3299 static void gen_fret(DisasContext
*ctx
)
3301 TCGv temp
= tcg_temp_new();
3303 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
3304 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
3305 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
3306 tcg_gen_mov_tl(cpu_PC
, temp
);
3308 ctx
->bstate
= BS_BRANCH
;
3310 tcg_temp_free(temp
);
3313 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
3314 int r2
, int32_t constant
, int32_t offset
)
3320 /* SB-format jumps */
3323 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3325 case OPC1_32_B_CALL
:
3326 case OPC1_16_SB_CALL
:
3327 gen_helper_1arg(call
, ctx
->next_pc
);
3328 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3331 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
3333 case OPC1_16_SB_JNZ
:
3334 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
3336 /* SBC-format jumps */
3337 case OPC1_16_SBC_JEQ
:
3338 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
3340 case OPC1_16_SBC_JNE
:
3341 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
3343 /* SBRN-format jumps */
3344 case OPC1_16_SBRN_JZ_T
:
3345 temp
= tcg_temp_new();
3346 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3347 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3348 tcg_temp_free(temp
);
3350 case OPC1_16_SBRN_JNZ_T
:
3351 temp
= tcg_temp_new();
3352 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3353 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3354 tcg_temp_free(temp
);
3356 /* SBR-format jumps */
3357 case OPC1_16_SBR_JEQ
:
3358 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3361 case OPC1_16_SBR_JNE
:
3362 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3365 case OPC1_16_SBR_JNZ
:
3366 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
3368 case OPC1_16_SBR_JNZ_A
:
3369 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3371 case OPC1_16_SBR_JGEZ
:
3372 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
3374 case OPC1_16_SBR_JGTZ
:
3375 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
3377 case OPC1_16_SBR_JLEZ
:
3378 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
3380 case OPC1_16_SBR_JLTZ
:
3381 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
3383 case OPC1_16_SBR_JZ
:
3384 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
3386 case OPC1_16_SBR_JZ_A
:
3387 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3389 case OPC1_16_SBR_LOOP
:
3390 gen_loop(ctx
, r1
, offset
* 2 - 32);
3392 /* SR-format jumps */
3394 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3397 case OPC2_32_SYS_RET
:
3398 case OPC2_16_SR_RET
:
3399 gen_helper_ret(cpu_env
);
3403 case OPC1_32_B_CALLA
:
3404 gen_helper_1arg(call
, ctx
->next_pc
);
3405 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3407 case OPC1_32_B_FCALL
:
3408 gen_fcall_save_ctx(ctx
);
3409 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3411 case OPC1_32_B_FCALLA
:
3412 gen_fcall_save_ctx(ctx
);
3413 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3416 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3419 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3422 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3423 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3426 case OPCM_32_BRC_EQ_NEQ
:
3427 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3428 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3430 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3433 case OPCM_32_BRC_GE
:
3434 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3435 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3437 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3438 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3442 case OPCM_32_BRC_JLT
:
3443 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3444 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3446 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3447 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3451 case OPCM_32_BRC_JNE
:
3452 temp
= tcg_temp_new();
3453 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3454 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3455 /* subi is unconditional */
3456 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3457 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3459 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3460 /* addi is unconditional */
3461 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3462 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3464 tcg_temp_free(temp
);
3467 case OPCM_32_BRN_JTT
:
3468 n
= MASK_OP_BRN_N(ctx
->opcode
);
3470 temp
= tcg_temp_new();
3471 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3473 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3474 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3476 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3478 tcg_temp_free(temp
);
3481 case OPCM_32_BRR_EQ_NEQ
:
3482 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3483 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3486 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3490 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3491 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3492 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3495 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3499 case OPCM_32_BRR_GE
:
3500 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3501 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3504 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3508 case OPCM_32_BRR_JLT
:
3509 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3510 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3513 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3517 case OPCM_32_BRR_LOOP
:
3518 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3519 gen_loop(ctx
, r2
, offset
* 2);
3521 /* OPC2_32_BRR_LOOPU */
3522 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3525 case OPCM_32_BRR_JNE
:
3526 temp
= tcg_temp_new();
3527 temp2
= tcg_temp_new();
3528 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3529 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3530 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3531 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3532 /* subi is unconditional */
3533 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3534 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3536 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3537 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3538 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3539 /* addi is unconditional */
3540 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3541 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3543 tcg_temp_free(temp
);
3544 tcg_temp_free(temp2
);
3546 case OPCM_32_BRR_JNZ
:
3547 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3548 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3550 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3554 printf("Branch Error at %x\n", ctx
->pc
);
3556 ctx
->bstate
= BS_BRANCH
;
3561 * Functions for decoding instructions
3564 static void decode_src_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int op1
)
3570 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3571 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3574 case OPC1_16_SRC_ADD
:
3575 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3577 case OPC1_16_SRC_ADD_A15
:
3578 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3580 case OPC1_16_SRC_ADD_15A
:
3581 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3583 case OPC1_16_SRC_ADD_A
:
3584 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3586 case OPC1_16_SRC_CADD
:
3587 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3590 case OPC1_16_SRC_CADDN
:
3591 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3594 case OPC1_16_SRC_CMOV
:
3595 temp
= tcg_const_tl(0);
3596 temp2
= tcg_const_tl(const4
);
3597 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3598 temp2
, cpu_gpr_d
[r1
]);
3599 tcg_temp_free(temp
);
3600 tcg_temp_free(temp2
);
3602 case OPC1_16_SRC_CMOVN
:
3603 temp
= tcg_const_tl(0);
3604 temp2
= tcg_const_tl(const4
);
3605 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3606 temp2
, cpu_gpr_d
[r1
]);
3607 tcg_temp_free(temp
);
3608 tcg_temp_free(temp2
);
3610 case OPC1_16_SRC_EQ
:
3611 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3614 case OPC1_16_SRC_LT
:
3615 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3618 case OPC1_16_SRC_MOV
:
3619 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3621 case OPC1_16_SRC_MOV_A
:
3622 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3623 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3625 case OPC1_16_SRC_MOV_E
:
3626 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3627 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3628 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3629 } /* TODO: else raise illegal opcode trap */
3631 case OPC1_16_SRC_SH
:
3632 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3634 case OPC1_16_SRC_SHA
:
3635 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3640 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3645 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3646 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3649 case OPC1_16_SRR_ADD
:
3650 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3652 case OPC1_16_SRR_ADD_A15
:
3653 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3655 case OPC1_16_SRR_ADD_15A
:
3656 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3658 case OPC1_16_SRR_ADD_A
:
3659 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3661 case OPC1_16_SRR_ADDS
:
3662 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3664 case OPC1_16_SRR_AND
:
3665 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3667 case OPC1_16_SRR_CMOV
:
3668 temp
= tcg_const_tl(0);
3669 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3670 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3671 tcg_temp_free(temp
);
3673 case OPC1_16_SRR_CMOVN
:
3674 temp
= tcg_const_tl(0);
3675 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3676 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3677 tcg_temp_free(temp
);
3679 case OPC1_16_SRR_EQ
:
3680 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3683 case OPC1_16_SRR_LT
:
3684 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3687 case OPC1_16_SRR_MOV
:
3688 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3690 case OPC1_16_SRR_MOV_A
:
3691 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3693 case OPC1_16_SRR_MOV_AA
:
3694 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3696 case OPC1_16_SRR_MOV_D
:
3697 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3699 case OPC1_16_SRR_MUL
:
3700 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3702 case OPC1_16_SRR_OR
:
3703 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3705 case OPC1_16_SRR_SUB
:
3706 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3708 case OPC1_16_SRR_SUB_A15B
:
3709 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3711 case OPC1_16_SRR_SUB_15AB
:
3712 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3714 case OPC1_16_SRR_SUBS
:
3715 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3717 case OPC1_16_SRR_XOR
:
3718 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3723 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3727 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3728 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3731 case OPC1_16_SSR_ST_A
:
3732 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3734 case OPC1_16_SSR_ST_A_POSTINC
:
3735 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3736 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3738 case OPC1_16_SSR_ST_B
:
3739 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3741 case OPC1_16_SSR_ST_B_POSTINC
:
3742 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3743 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3745 case OPC1_16_SSR_ST_H
:
3746 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3748 case OPC1_16_SSR_ST_H_POSTINC
:
3749 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3750 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3752 case OPC1_16_SSR_ST_W
:
3753 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3755 case OPC1_16_SSR_ST_W_POSTINC
:
3756 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3757 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3762 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3766 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3769 case OPC1_16_SC_AND
:
3770 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3772 case OPC1_16_SC_BISR
:
3773 gen_helper_1arg(bisr
, const16
& 0xff);
3775 case OPC1_16_SC_LD_A
:
3776 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3778 case OPC1_16_SC_LD_W
:
3779 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3781 case OPC1_16_SC_MOV
:
3782 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3785 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3787 case OPC1_16_SC_ST_A
:
3788 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3790 case OPC1_16_SC_ST_W
:
3791 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3793 case OPC1_16_SC_SUB_A
:
3794 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3799 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3803 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3804 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3808 case OPC1_16_SLR_LD_A
:
3809 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3811 case OPC1_16_SLR_LD_A_POSTINC
:
3812 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3813 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3815 case OPC1_16_SLR_LD_BU
:
3816 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3818 case OPC1_16_SLR_LD_BU_POSTINC
:
3819 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3820 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3822 case OPC1_16_SLR_LD_H
:
3823 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3825 case OPC1_16_SLR_LD_H_POSTINC
:
3826 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3827 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3829 case OPC1_16_SLR_LD_W
:
3830 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3832 case OPC1_16_SLR_LD_W_POSTINC
:
3833 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3834 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3839 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3844 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3845 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3849 case OPC1_16_SRO_LD_A
:
3850 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3852 case OPC1_16_SRO_LD_BU
:
3853 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3855 case OPC1_16_SRO_LD_H
:
3856 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3858 case OPC1_16_SRO_LD_W
:
3859 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3861 case OPC1_16_SRO_ST_A
:
3862 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3864 case OPC1_16_SRO_ST_B
:
3865 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3867 case OPC1_16_SRO_ST_H
:
3868 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3870 case OPC1_16_SRO_ST_W
:
3871 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3876 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3879 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3882 case OPC2_16_SR_NOP
:
3884 case OPC2_16_SR_RET
:
3885 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3887 case OPC2_16_SR_RFE
:
3888 gen_helper_rfe(cpu_env
);
3890 ctx
->bstate
= BS_BRANCH
;
3892 case OPC2_16_SR_DEBUG
:
3893 /* raise EXCP_DEBUG */
3895 case OPC2_16_SR_FRET
:
3900 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3906 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3907 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3910 case OPC2_16_SR_RSUB
:
3911 /* overflow only if r1 = -0x80000000 */
3912 temp
= tcg_const_i32(-0x80000000);
3914 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3915 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3917 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3919 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3921 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3922 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3924 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3925 tcg_temp_free(temp
);
3927 case OPC2_16_SR_SAT_B
:
3928 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3930 case OPC2_16_SR_SAT_BU
:
3931 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3933 case OPC2_16_SR_SAT_H
:
3934 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3936 case OPC2_16_SR_SAT_HU
:
3937 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3942 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3950 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3952 /* handle ADDSC.A opcode only being 6 bit long */
3953 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3954 op1
= OPC1_16_SRRS_ADDSC_A
;
3958 case OPC1_16_SRC_ADD
:
3959 case OPC1_16_SRC_ADD_A15
:
3960 case OPC1_16_SRC_ADD_15A
:
3961 case OPC1_16_SRC_ADD_A
:
3962 case OPC1_16_SRC_CADD
:
3963 case OPC1_16_SRC_CADDN
:
3964 case OPC1_16_SRC_CMOV
:
3965 case OPC1_16_SRC_CMOVN
:
3966 case OPC1_16_SRC_EQ
:
3967 case OPC1_16_SRC_LT
:
3968 case OPC1_16_SRC_MOV
:
3969 case OPC1_16_SRC_MOV_A
:
3970 case OPC1_16_SRC_MOV_E
:
3971 case OPC1_16_SRC_SH
:
3972 case OPC1_16_SRC_SHA
:
3973 decode_src_opc(env
, ctx
, op1
);
3976 case OPC1_16_SRR_ADD
:
3977 case OPC1_16_SRR_ADD_A15
:
3978 case OPC1_16_SRR_ADD_15A
:
3979 case OPC1_16_SRR_ADD_A
:
3980 case OPC1_16_SRR_ADDS
:
3981 case OPC1_16_SRR_AND
:
3982 case OPC1_16_SRR_CMOV
:
3983 case OPC1_16_SRR_CMOVN
:
3984 case OPC1_16_SRR_EQ
:
3985 case OPC1_16_SRR_LT
:
3986 case OPC1_16_SRR_MOV
:
3987 case OPC1_16_SRR_MOV_A
:
3988 case OPC1_16_SRR_MOV_AA
:
3989 case OPC1_16_SRR_MOV_D
:
3990 case OPC1_16_SRR_MUL
:
3991 case OPC1_16_SRR_OR
:
3992 case OPC1_16_SRR_SUB
:
3993 case OPC1_16_SRR_SUB_A15B
:
3994 case OPC1_16_SRR_SUB_15AB
:
3995 case OPC1_16_SRR_SUBS
:
3996 case OPC1_16_SRR_XOR
:
3997 decode_srr_opc(ctx
, op1
);
4000 case OPC1_16_SSR_ST_A
:
4001 case OPC1_16_SSR_ST_A_POSTINC
:
4002 case OPC1_16_SSR_ST_B
:
4003 case OPC1_16_SSR_ST_B_POSTINC
:
4004 case OPC1_16_SSR_ST_H
:
4005 case OPC1_16_SSR_ST_H_POSTINC
:
4006 case OPC1_16_SSR_ST_W
:
4007 case OPC1_16_SSR_ST_W_POSTINC
:
4008 decode_ssr_opc(ctx
, op1
);
4011 case OPC1_16_SRRS_ADDSC_A
:
4012 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
4013 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
4014 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
4015 temp
= tcg_temp_new();
4016 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
4017 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
4018 tcg_temp_free(temp
);
4021 case OPC1_16_SLRO_LD_A
:
4022 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4023 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4024 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4026 case OPC1_16_SLRO_LD_BU
:
4027 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4028 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4029 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4031 case OPC1_16_SLRO_LD_H
:
4032 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4033 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4034 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4036 case OPC1_16_SLRO_LD_W
:
4037 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4038 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4039 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4042 case OPC1_16_SB_CALL
:
4044 case OPC1_16_SB_JNZ
:
4046 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
4047 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
4050 case OPC1_16_SBC_JEQ
:
4051 case OPC1_16_SBC_JNE
:
4052 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
4053 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
4054 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4057 case OPC1_16_SBRN_JNZ_T
:
4058 case OPC1_16_SBRN_JZ_T
:
4059 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
4060 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
4061 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4064 case OPC1_16_SBR_JEQ
:
4065 case OPC1_16_SBR_JGEZ
:
4066 case OPC1_16_SBR_JGTZ
:
4067 case OPC1_16_SBR_JLEZ
:
4068 case OPC1_16_SBR_JLTZ
:
4069 case OPC1_16_SBR_JNE
:
4070 case OPC1_16_SBR_JNZ
:
4071 case OPC1_16_SBR_JNZ_A
:
4072 case OPC1_16_SBR_JZ
:
4073 case OPC1_16_SBR_JZ_A
:
4074 case OPC1_16_SBR_LOOP
:
4075 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
4076 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
4077 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
4080 case OPC1_16_SC_AND
:
4081 case OPC1_16_SC_BISR
:
4082 case OPC1_16_SC_LD_A
:
4083 case OPC1_16_SC_LD_W
:
4084 case OPC1_16_SC_MOV
:
4086 case OPC1_16_SC_ST_A
:
4087 case OPC1_16_SC_ST_W
:
4088 case OPC1_16_SC_SUB_A
:
4089 decode_sc_opc(ctx
, op1
);
4092 case OPC1_16_SLR_LD_A
:
4093 case OPC1_16_SLR_LD_A_POSTINC
:
4094 case OPC1_16_SLR_LD_BU
:
4095 case OPC1_16_SLR_LD_BU_POSTINC
:
4096 case OPC1_16_SLR_LD_H
:
4097 case OPC1_16_SLR_LD_H_POSTINC
:
4098 case OPC1_16_SLR_LD_W
:
4099 case OPC1_16_SLR_LD_W_POSTINC
:
4100 decode_slr_opc(ctx
, op1
);
4103 case OPC1_16_SRO_LD_A
:
4104 case OPC1_16_SRO_LD_BU
:
4105 case OPC1_16_SRO_LD_H
:
4106 case OPC1_16_SRO_LD_W
:
4107 case OPC1_16_SRO_ST_A
:
4108 case OPC1_16_SRO_ST_B
:
4109 case OPC1_16_SRO_ST_H
:
4110 case OPC1_16_SRO_ST_W
:
4111 decode_sro_opc(ctx
, op1
);
4114 case OPC1_16_SSRO_ST_A
:
4115 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4116 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4117 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4119 case OPC1_16_SSRO_ST_B
:
4120 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4121 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4122 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4124 case OPC1_16_SSRO_ST_H
:
4125 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4126 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4127 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4129 case OPC1_16_SSRO_ST_W
:
4130 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4131 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4132 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4135 case OPCM_16_SR_SYSTEM
:
4136 decode_sr_system(env
, ctx
);
4138 case OPCM_16_SR_ACCU
:
4139 decode_sr_accu(env
, ctx
);
4142 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4143 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
4145 case OPC1_16_SR_NOT
:
4146 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4147 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
4153 * 32 bit instructions
4157 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
4164 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4165 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4166 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4168 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4171 case OPC2_32_ABS_LD_A
:
4172 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4174 case OPC2_32_ABS_LD_D
:
4175 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4177 case OPC2_32_ABS_LD_DA
:
4178 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4180 case OPC2_32_ABS_LD_W
:
4181 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4185 tcg_temp_free(temp
);
4188 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
4195 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4196 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4197 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4199 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4202 case OPC2_32_ABS_LD_B
:
4203 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
4205 case OPC2_32_ABS_LD_BU
:
4206 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4208 case OPC2_32_ABS_LD_H
:
4209 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
4211 case OPC2_32_ABS_LD_HU
:
4212 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4216 tcg_temp_free(temp
);
4219 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
4226 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4227 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4228 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4230 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4233 case OPC2_32_ABS_LDMST
:
4234 gen_ldmst(ctx
, r1
, temp
);
4236 case OPC2_32_ABS_SWAP_W
:
4237 gen_swap(ctx
, r1
, temp
);
4241 tcg_temp_free(temp
);
4244 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
4249 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4250 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4253 case OPC2_32_ABS_LDLCX
:
4254 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
4256 case OPC2_32_ABS_LDUCX
:
4257 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
4259 case OPC2_32_ABS_STLCX
:
4260 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
4262 case OPC2_32_ABS_STUCX
:
4263 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
4268 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
4275 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4276 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4277 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4279 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4282 case OPC2_32_ABS_ST_A
:
4283 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4285 case OPC2_32_ABS_ST_D
:
4286 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4288 case OPC2_32_ABS_ST_DA
:
4289 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4291 case OPC2_32_ABS_ST_W
:
4292 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4296 tcg_temp_free(temp
);
4299 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
4306 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4307 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4308 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4310 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4313 case OPC2_32_ABS_ST_B
:
4314 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4316 case OPC2_32_ABS_ST_H
:
4317 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4320 tcg_temp_free(temp
);
4325 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4331 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4332 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4333 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4334 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4335 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4336 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4340 case OPC2_32_BIT_AND_AND_T
:
4341 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4342 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
4344 case OPC2_32_BIT_AND_ANDN_T
:
4345 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4346 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
4348 case OPC2_32_BIT_AND_NOR_T
:
4349 if (TCG_TARGET_HAS_andc_i32
) {
4350 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4351 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
4353 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4354 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4357 case OPC2_32_BIT_AND_OR_T
:
4358 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4359 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4364 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
4369 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4370 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4371 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4372 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4373 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4374 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4377 case OPC2_32_BIT_AND_T
:
4378 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4379 pos1
, pos2
, &tcg_gen_and_tl
);
4381 case OPC2_32_BIT_ANDN_T
:
4382 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4383 pos1
, pos2
, &tcg_gen_andc_tl
);
4385 case OPC2_32_BIT_NOR_T
:
4386 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4387 pos1
, pos2
, &tcg_gen_nor_tl
);
4389 case OPC2_32_BIT_OR_T
:
4390 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4391 pos1
, pos2
, &tcg_gen_or_tl
);
4396 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4402 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4403 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4404 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4405 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4406 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4407 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4409 temp
= tcg_temp_new();
4411 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4412 if (op2
== OPC2_32_BIT_INSN_T
) {
4413 tcg_gen_not_tl(temp
, temp
);
4415 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4416 tcg_temp_free(temp
);
4419 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4426 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4427 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4428 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4429 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4430 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4431 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4434 case OPC2_32_BIT_NAND_T
:
4435 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4436 pos1
, pos2
, &tcg_gen_nand_tl
);
4438 case OPC2_32_BIT_ORN_T
:
4439 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4440 pos1
, pos2
, &tcg_gen_orc_tl
);
4442 case OPC2_32_BIT_XNOR_T
:
4443 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4444 pos1
, pos2
, &tcg_gen_eqv_tl
);
4446 case OPC2_32_BIT_XOR_T
:
4447 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4448 pos1
, pos2
, &tcg_gen_xor_tl
);
4453 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
4460 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4461 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4462 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4463 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4464 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4465 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4468 case OPC2_32_BIT_OR_AND_T
:
4469 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4470 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4472 case OPC2_32_BIT_OR_ANDN_T
:
4473 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4474 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4476 case OPC2_32_BIT_OR_NOR_T
:
4477 if (TCG_TARGET_HAS_orc_i32
) {
4478 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4479 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4481 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4482 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4485 case OPC2_32_BIT_OR_OR_T
:
4486 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4487 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4492 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
4499 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4500 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4501 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4502 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4503 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4504 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4506 temp
= tcg_temp_new();
4509 case OPC2_32_BIT_SH_AND_T
:
4510 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4511 pos1
, pos2
, &tcg_gen_and_tl
);
4513 case OPC2_32_BIT_SH_ANDN_T
:
4514 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4515 pos1
, pos2
, &tcg_gen_andc_tl
);
4517 case OPC2_32_BIT_SH_NOR_T
:
4518 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4519 pos1
, pos2
, &tcg_gen_nor_tl
);
4521 case OPC2_32_BIT_SH_OR_T
:
4522 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4523 pos1
, pos2
, &tcg_gen_or_tl
);
4526 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4527 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4528 tcg_temp_free(temp
);
4531 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4538 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4539 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4540 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4541 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4542 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4543 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4545 temp
= tcg_temp_new();
4548 case OPC2_32_BIT_SH_NAND_T
:
4549 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4550 pos1
, pos2
, &tcg_gen_nand_tl
);
4552 case OPC2_32_BIT_SH_ORN_T
:
4553 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4554 pos1
, pos2
, &tcg_gen_orc_tl
);
4556 case OPC2_32_BIT_SH_XNOR_T
:
4557 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4558 pos1
, pos2
, &tcg_gen_eqv_tl
);
4560 case OPC2_32_BIT_SH_XOR_T
:
4561 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4562 pos1
, pos2
, &tcg_gen_xor_tl
);
4565 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4566 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4567 tcg_temp_free(temp
);
4573 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4581 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4582 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4583 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4584 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4587 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4588 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4589 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4590 /* instruction to access the cache */
4592 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4593 case OPC2_32_BO_CACHEA_W_POSTINC
:
4594 case OPC2_32_BO_CACHEA_I_POSTINC
:
4595 /* instruction to access the cache, but we still need to handle
4596 the addressing mode */
4597 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4599 case OPC2_32_BO_CACHEA_WI_PREINC
:
4600 case OPC2_32_BO_CACHEA_W_PREINC
:
4601 case OPC2_32_BO_CACHEA_I_PREINC
:
4602 /* instruction to access the cache, but we still need to handle
4603 the addressing mode */
4604 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4606 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4607 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4608 /* TODO: Raise illegal opcode trap,
4609 if !tricore_feature(TRICORE_FEATURE_131) */
4611 case OPC2_32_BO_CACHEI_W_POSTINC
:
4612 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4613 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4614 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4615 } /* TODO: else raise illegal opcode trap */
4617 case OPC2_32_BO_CACHEI_W_PREINC
:
4618 case OPC2_32_BO_CACHEI_WI_PREINC
:
4619 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4620 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4621 } /* TODO: else raise illegal opcode trap */
4623 case OPC2_32_BO_ST_A_SHORTOFF
:
4624 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4626 case OPC2_32_BO_ST_A_POSTINC
:
4627 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4629 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4631 case OPC2_32_BO_ST_A_PREINC
:
4632 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4634 case OPC2_32_BO_ST_B_SHORTOFF
:
4635 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4637 case OPC2_32_BO_ST_B_POSTINC
:
4638 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4640 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4642 case OPC2_32_BO_ST_B_PREINC
:
4643 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4645 case OPC2_32_BO_ST_D_SHORTOFF
:
4646 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4649 case OPC2_32_BO_ST_D_POSTINC
:
4650 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4651 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4653 case OPC2_32_BO_ST_D_PREINC
:
4654 temp
= tcg_temp_new();
4655 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4656 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4657 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4658 tcg_temp_free(temp
);
4660 case OPC2_32_BO_ST_DA_SHORTOFF
:
4661 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4664 case OPC2_32_BO_ST_DA_POSTINC
:
4665 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4666 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4668 case OPC2_32_BO_ST_DA_PREINC
:
4669 temp
= tcg_temp_new();
4670 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4671 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4672 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4673 tcg_temp_free(temp
);
4675 case OPC2_32_BO_ST_H_SHORTOFF
:
4676 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4678 case OPC2_32_BO_ST_H_POSTINC
:
4679 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4681 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4683 case OPC2_32_BO_ST_H_PREINC
:
4684 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4686 case OPC2_32_BO_ST_Q_SHORTOFF
:
4687 temp
= tcg_temp_new();
4688 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4689 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4690 tcg_temp_free(temp
);
4692 case OPC2_32_BO_ST_Q_POSTINC
:
4693 temp
= tcg_temp_new();
4694 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4695 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4697 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4698 tcg_temp_free(temp
);
4700 case OPC2_32_BO_ST_Q_PREINC
:
4701 temp
= tcg_temp_new();
4702 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4703 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4704 tcg_temp_free(temp
);
4706 case OPC2_32_BO_ST_W_SHORTOFF
:
4707 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4709 case OPC2_32_BO_ST_W_POSTINC
:
4710 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4712 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4714 case OPC2_32_BO_ST_W_PREINC
:
4715 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4720 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4726 TCGv temp
, temp2
, temp3
;
4728 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4729 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4730 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4731 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4733 temp
= tcg_temp_new();
4734 temp2
= tcg_temp_new();
4735 temp3
= tcg_const_i32(off10
);
4737 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4738 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4741 case OPC2_32_BO_CACHEA_WI_BR
:
4742 case OPC2_32_BO_CACHEA_W_BR
:
4743 case OPC2_32_BO_CACHEA_I_BR
:
4744 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4746 case OPC2_32_BO_CACHEA_WI_CIRC
:
4747 case OPC2_32_BO_CACHEA_W_CIRC
:
4748 case OPC2_32_BO_CACHEA_I_CIRC
:
4749 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4751 case OPC2_32_BO_ST_A_BR
:
4752 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4753 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4755 case OPC2_32_BO_ST_A_CIRC
:
4756 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4757 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4759 case OPC2_32_BO_ST_B_BR
:
4760 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4761 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4763 case OPC2_32_BO_ST_B_CIRC
:
4764 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4765 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4767 case OPC2_32_BO_ST_D_BR
:
4768 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4769 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4771 case OPC2_32_BO_ST_D_CIRC
:
4772 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4773 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4774 tcg_gen_addi_tl(temp
, temp
, 4);
4775 tcg_gen_rem_tl(temp
, temp
, temp2
);
4776 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4777 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4778 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4780 case OPC2_32_BO_ST_DA_BR
:
4781 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4782 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4784 case OPC2_32_BO_ST_DA_CIRC
:
4785 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4786 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4787 tcg_gen_addi_tl(temp
, temp
, 4);
4788 tcg_gen_rem_tl(temp
, temp
, temp2
);
4789 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4790 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4791 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4793 case OPC2_32_BO_ST_H_BR
:
4794 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4795 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4797 case OPC2_32_BO_ST_H_CIRC
:
4798 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4799 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4801 case OPC2_32_BO_ST_Q_BR
:
4802 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4803 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4804 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4806 case OPC2_32_BO_ST_Q_CIRC
:
4807 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4808 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4809 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4811 case OPC2_32_BO_ST_W_BR
:
4812 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4813 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4815 case OPC2_32_BO_ST_W_CIRC
:
4816 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4817 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4820 tcg_temp_free(temp
);
4821 tcg_temp_free(temp2
);
4822 tcg_temp_free(temp3
);
4825 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4833 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4834 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4835 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4836 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4839 case OPC2_32_BO_LD_A_SHORTOFF
:
4840 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4842 case OPC2_32_BO_LD_A_POSTINC
:
4843 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4845 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4847 case OPC2_32_BO_LD_A_PREINC
:
4848 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4850 case OPC2_32_BO_LD_B_SHORTOFF
:
4851 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4853 case OPC2_32_BO_LD_B_POSTINC
:
4854 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4856 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4858 case OPC2_32_BO_LD_B_PREINC
:
4859 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4861 case OPC2_32_BO_LD_BU_SHORTOFF
:
4862 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4864 case OPC2_32_BO_LD_BU_POSTINC
:
4865 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4867 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4869 case OPC2_32_BO_LD_BU_PREINC
:
4870 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4872 case OPC2_32_BO_LD_D_SHORTOFF
:
4873 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4876 case OPC2_32_BO_LD_D_POSTINC
:
4877 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4878 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4880 case OPC2_32_BO_LD_D_PREINC
:
4881 temp
= tcg_temp_new();
4882 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4883 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4884 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4885 tcg_temp_free(temp
);
4887 case OPC2_32_BO_LD_DA_SHORTOFF
:
4888 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4891 case OPC2_32_BO_LD_DA_POSTINC
:
4892 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4893 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4895 case OPC2_32_BO_LD_DA_PREINC
:
4896 temp
= tcg_temp_new();
4897 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4898 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4899 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4900 tcg_temp_free(temp
);
4902 case OPC2_32_BO_LD_H_SHORTOFF
:
4903 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4905 case OPC2_32_BO_LD_H_POSTINC
:
4906 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4908 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4910 case OPC2_32_BO_LD_H_PREINC
:
4911 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4913 case OPC2_32_BO_LD_HU_SHORTOFF
:
4914 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4916 case OPC2_32_BO_LD_HU_POSTINC
:
4917 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4919 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4921 case OPC2_32_BO_LD_HU_PREINC
:
4922 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4924 case OPC2_32_BO_LD_Q_SHORTOFF
:
4925 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4926 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4928 case OPC2_32_BO_LD_Q_POSTINC
:
4929 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4931 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4932 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4934 case OPC2_32_BO_LD_Q_PREINC
:
4935 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4936 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4938 case OPC2_32_BO_LD_W_SHORTOFF
:
4939 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4941 case OPC2_32_BO_LD_W_POSTINC
:
4942 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4944 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4946 case OPC2_32_BO_LD_W_PREINC
:
4947 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4952 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4959 TCGv temp
, temp2
, temp3
;
4961 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4962 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4963 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4964 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4966 temp
= tcg_temp_new();
4967 temp2
= tcg_temp_new();
4968 temp3
= tcg_const_i32(off10
);
4970 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4971 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4975 case OPC2_32_BO_LD_A_BR
:
4976 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4977 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4979 case OPC2_32_BO_LD_A_CIRC
:
4980 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4981 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4983 case OPC2_32_BO_LD_B_BR
:
4984 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4985 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4987 case OPC2_32_BO_LD_B_CIRC
:
4988 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4989 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4991 case OPC2_32_BO_LD_BU_BR
:
4992 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4993 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4995 case OPC2_32_BO_LD_BU_CIRC
:
4996 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4997 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4999 case OPC2_32_BO_LD_D_BR
:
5000 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
5001 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5003 case OPC2_32_BO_LD_D_CIRC
:
5004 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5005 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
5006 tcg_gen_addi_tl(temp
, temp
, 4);
5007 tcg_gen_rem_tl(temp
, temp
, temp2
);
5008 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5009 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5010 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5012 case OPC2_32_BO_LD_DA_BR
:
5013 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
5014 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5016 case OPC2_32_BO_LD_DA_CIRC
:
5017 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5018 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
5019 tcg_gen_addi_tl(temp
, temp
, 4);
5020 tcg_gen_rem_tl(temp
, temp
, temp2
);
5021 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5022 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
5023 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5025 case OPC2_32_BO_LD_H_BR
:
5026 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5027 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5029 case OPC2_32_BO_LD_H_CIRC
:
5030 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5031 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5033 case OPC2_32_BO_LD_HU_BR
:
5034 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5035 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5037 case OPC2_32_BO_LD_HU_CIRC
:
5038 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5039 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5041 case OPC2_32_BO_LD_Q_BR
:
5042 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5043 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5044 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5046 case OPC2_32_BO_LD_Q_CIRC
:
5047 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5048 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5049 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5051 case OPC2_32_BO_LD_W_BR
:
5052 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5053 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5055 case OPC2_32_BO_LD_W_CIRC
:
5056 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5057 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5060 tcg_temp_free(temp
);
5061 tcg_temp_free(temp2
);
5062 tcg_temp_free(temp3
);
5065 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
5074 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5075 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5076 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5077 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5080 temp
= tcg_temp_new();
5081 temp2
= tcg_temp_new();
5084 case OPC2_32_BO_LDLCX_SHORTOFF
:
5085 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5086 gen_helper_ldlcx(cpu_env
, temp
);
5088 case OPC2_32_BO_LDMST_SHORTOFF
:
5089 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5090 gen_ldmst(ctx
, r1
, temp
);
5092 case OPC2_32_BO_LDMST_POSTINC
:
5093 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5094 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5096 case OPC2_32_BO_LDMST_PREINC
:
5097 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5098 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5100 case OPC2_32_BO_LDUCX_SHORTOFF
:
5101 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5102 gen_helper_lducx(cpu_env
, temp
);
5104 case OPC2_32_BO_LEA_SHORTOFF
:
5105 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
5107 case OPC2_32_BO_STLCX_SHORTOFF
:
5108 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5109 gen_helper_stlcx(cpu_env
, temp
);
5111 case OPC2_32_BO_STUCX_SHORTOFF
:
5112 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5113 gen_helper_stucx(cpu_env
, temp
);
5115 case OPC2_32_BO_SWAP_W_SHORTOFF
:
5116 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5117 gen_swap(ctx
, r1
, temp
);
5119 case OPC2_32_BO_SWAP_W_POSTINC
:
5120 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5121 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5123 case OPC2_32_BO_SWAP_W_PREINC
:
5124 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5125 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5127 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
5128 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5129 gen_cmpswap(ctx
, r1
, temp
);
5131 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
5132 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5133 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5135 case OPC2_32_BO_CMPSWAP_W_PREINC
:
5136 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5137 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5139 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
5140 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5141 gen_swapmsk(ctx
, r1
, temp
);
5143 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
5144 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5145 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5147 case OPC2_32_BO_SWAPMSK_W_PREINC
:
5148 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5149 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5152 tcg_temp_free(temp
);
5153 tcg_temp_free(temp2
);
5156 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
5163 TCGv temp
, temp2
, temp3
;
5165 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5166 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5167 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5168 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5170 temp
= tcg_temp_new();
5171 temp2
= tcg_temp_new();
5172 temp3
= tcg_const_i32(off10
);
5174 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
5175 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5178 case OPC2_32_BO_LDMST_BR
:
5179 gen_ldmst(ctx
, r1
, temp2
);
5180 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5182 case OPC2_32_BO_LDMST_CIRC
:
5183 gen_ldmst(ctx
, r1
, temp2
);
5184 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5186 case OPC2_32_BO_SWAP_W_BR
:
5187 gen_swap(ctx
, r1
, temp2
);
5188 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5190 case OPC2_32_BO_SWAP_W_CIRC
:
5191 gen_swap(ctx
, r1
, temp2
);
5192 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5194 case OPC2_32_BO_CMPSWAP_W_BR
:
5195 gen_cmpswap(ctx
, r1
, temp2
);
5196 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5198 case OPC2_32_BO_CMPSWAP_W_CIRC
:
5199 gen_cmpswap(ctx
, r1
, temp2
);
5200 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5202 case OPC2_32_BO_SWAPMSK_W_BR
:
5203 gen_swapmsk(ctx
, r1
, temp2
);
5204 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5206 case OPC2_32_BO_SWAPMSK_W_CIRC
:
5207 gen_swapmsk(ctx
, r1
, temp2
);
5208 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5212 tcg_temp_free(temp
);
5213 tcg_temp_free(temp2
);
5214 tcg_temp_free(temp3
);
5217 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
5223 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
5224 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
5225 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
5228 case OPC1_32_BOL_LD_A_LONGOFF
:
5229 temp
= tcg_temp_new();
5230 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5231 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5232 tcg_temp_free(temp
);
5234 case OPC1_32_BOL_LD_W_LONGOFF
:
5235 temp
= tcg_temp_new();
5236 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5237 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5238 tcg_temp_free(temp
);
5240 case OPC1_32_BOL_LEA_LONGOFF
:
5241 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
5243 case OPC1_32_BOL_ST_A_LONGOFF
:
5244 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5245 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5247 /* raise illegal opcode trap */
5250 case OPC1_32_BOL_ST_W_LONGOFF
:
5251 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5253 case OPC1_32_BOL_LD_B_LONGOFF
:
5254 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5255 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5257 /* raise illegal opcode trap */
5260 case OPC1_32_BOL_LD_BU_LONGOFF
:
5261 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5262 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
5264 /* raise illegal opcode trap */
5267 case OPC1_32_BOL_LD_H_LONGOFF
:
5268 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5269 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5271 /* raise illegal opcode trap */
5274 case OPC1_32_BOL_LD_HU_LONGOFF
:
5275 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5276 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
5278 /* raise illegal opcode trap */
5281 case OPC1_32_BOL_ST_B_LONGOFF
:
5282 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5283 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5285 /* raise illegal opcode trap */
5288 case OPC1_32_BOL_ST_H_LONGOFF
:
5289 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5290 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5292 /* raise illegal opcode trap */
5299 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5306 r2
= MASK_OP_RC_D(ctx
->opcode
);
5307 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5308 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5309 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5311 temp
= tcg_temp_new();
5314 case OPC2_32_RC_AND
:
5315 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5317 case OPC2_32_RC_ANDN
:
5318 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5320 case OPC2_32_RC_NAND
:
5321 tcg_gen_movi_tl(temp
, const9
);
5322 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5324 case OPC2_32_RC_NOR
:
5325 tcg_gen_movi_tl(temp
, const9
);
5326 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5329 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5331 case OPC2_32_RC_ORN
:
5332 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5335 const9
= sextract32(const9
, 0, 6);
5336 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5338 case OPC2_32_RC_SH_H
:
5339 const9
= sextract32(const9
, 0, 5);
5340 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5342 case OPC2_32_RC_SHA
:
5343 const9
= sextract32(const9
, 0, 6);
5344 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5346 case OPC2_32_RC_SHA_H
:
5347 const9
= sextract32(const9
, 0, 5);
5348 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5350 case OPC2_32_RC_SHAS
:
5351 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5353 case OPC2_32_RC_XNOR
:
5354 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5355 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5357 case OPC2_32_RC_XOR
:
5358 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5361 tcg_temp_free(temp
);
5364 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5372 r2
= MASK_OP_RC_D(ctx
->opcode
);
5373 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5374 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5376 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5378 temp
= tcg_temp_new();
5381 case OPC2_32_RC_ABSDIF
:
5382 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5384 case OPC2_32_RC_ABSDIFS
:
5385 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5387 case OPC2_32_RC_ADD
:
5388 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5390 case OPC2_32_RC_ADDC
:
5391 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5393 case OPC2_32_RC_ADDS
:
5394 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5396 case OPC2_32_RC_ADDS_U
:
5397 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5399 case OPC2_32_RC_ADDX
:
5400 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5402 case OPC2_32_RC_AND_EQ
:
5403 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5404 const9
, &tcg_gen_and_tl
);
5406 case OPC2_32_RC_AND_GE
:
5407 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5408 const9
, &tcg_gen_and_tl
);
5410 case OPC2_32_RC_AND_GE_U
:
5411 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5412 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5413 const9
, &tcg_gen_and_tl
);
5415 case OPC2_32_RC_AND_LT
:
5416 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5417 const9
, &tcg_gen_and_tl
);
5419 case OPC2_32_RC_AND_LT_U
:
5420 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5421 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5422 const9
, &tcg_gen_and_tl
);
5424 case OPC2_32_RC_AND_NE
:
5425 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5426 const9
, &tcg_gen_and_tl
);
5429 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5431 case OPC2_32_RC_EQANY_B
:
5432 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5434 case OPC2_32_RC_EQANY_H
:
5435 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5438 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5440 case OPC2_32_RC_GE_U
:
5441 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5442 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5445 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5447 case OPC2_32_RC_LT_U
:
5448 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5449 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5451 case OPC2_32_RC_MAX
:
5452 tcg_gen_movi_tl(temp
, const9
);
5453 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5454 cpu_gpr_d
[r1
], temp
);
5456 case OPC2_32_RC_MAX_U
:
5457 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5458 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5459 cpu_gpr_d
[r1
], temp
);
5461 case OPC2_32_RC_MIN
:
5462 tcg_gen_movi_tl(temp
, const9
);
5463 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5464 cpu_gpr_d
[r1
], temp
);
5466 case OPC2_32_RC_MIN_U
:
5467 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5468 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5469 cpu_gpr_d
[r1
], temp
);
5472 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5474 case OPC2_32_RC_OR_EQ
:
5475 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5476 const9
, &tcg_gen_or_tl
);
5478 case OPC2_32_RC_OR_GE
:
5479 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5480 const9
, &tcg_gen_or_tl
);
5482 case OPC2_32_RC_OR_GE_U
:
5483 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5484 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5485 const9
, &tcg_gen_or_tl
);
5487 case OPC2_32_RC_OR_LT
:
5488 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5489 const9
, &tcg_gen_or_tl
);
5491 case OPC2_32_RC_OR_LT_U
:
5492 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5493 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5494 const9
, &tcg_gen_or_tl
);
5496 case OPC2_32_RC_OR_NE
:
5497 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5498 const9
, &tcg_gen_or_tl
);
5500 case OPC2_32_RC_RSUB
:
5501 tcg_gen_movi_tl(temp
, const9
);
5502 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5504 case OPC2_32_RC_RSUBS
:
5505 tcg_gen_movi_tl(temp
, const9
);
5506 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5508 case OPC2_32_RC_RSUBS_U
:
5509 tcg_gen_movi_tl(temp
, const9
);
5510 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5512 case OPC2_32_RC_SH_EQ
:
5513 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5515 case OPC2_32_RC_SH_GE
:
5516 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5518 case OPC2_32_RC_SH_GE_U
:
5519 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5520 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5522 case OPC2_32_RC_SH_LT
:
5523 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5525 case OPC2_32_RC_SH_LT_U
:
5526 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5527 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5529 case OPC2_32_RC_SH_NE
:
5530 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5532 case OPC2_32_RC_XOR_EQ
:
5533 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5534 const9
, &tcg_gen_xor_tl
);
5536 case OPC2_32_RC_XOR_GE
:
5537 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5538 const9
, &tcg_gen_xor_tl
);
5540 case OPC2_32_RC_XOR_GE_U
:
5541 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5542 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5543 const9
, &tcg_gen_xor_tl
);
5545 case OPC2_32_RC_XOR_LT
:
5546 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5547 const9
, &tcg_gen_xor_tl
);
5549 case OPC2_32_RC_XOR_LT_U
:
5550 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5551 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5552 const9
, &tcg_gen_xor_tl
);
5554 case OPC2_32_RC_XOR_NE
:
5555 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5556 const9
, &tcg_gen_xor_tl
);
5559 tcg_temp_free(temp
);
5562 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
5567 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5568 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5571 case OPC2_32_RC_BISR
:
5572 gen_helper_1arg(bisr
, const9
);
5574 case OPC2_32_RC_SYSCALL
:
5575 /* TODO: Add exception generation */
5580 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5586 r2
= MASK_OP_RC_D(ctx
->opcode
);
5587 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5588 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5590 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5593 case OPC2_32_RC_MUL_32
:
5594 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5596 case OPC2_32_RC_MUL_64
:
5597 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5599 case OPC2_32_RC_MULS_32
:
5600 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5602 case OPC2_32_RC_MUL_U_64
:
5603 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5604 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5606 case OPC2_32_RC_MULS_U_32
:
5607 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5608 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5614 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5618 int32_t pos
, width
, const4
;
5622 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5623 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5624 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5625 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5626 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5627 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5630 case OPC2_32_RCPW_IMASK
:
5631 /* if pos + width > 31 undefined result */
5632 if (pos
+ width
<= 31) {
5633 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5634 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5637 case OPC2_32_RCPW_INSERT
:
5638 /* if pos + width > 32 undefined result */
5639 if (pos
+ width
<= 32) {
5640 temp
= tcg_const_i32(const4
);
5641 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5642 tcg_temp_free(temp
);
5650 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5654 int32_t width
, const4
;
5656 TCGv temp
, temp2
, temp3
;
5658 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5659 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5660 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5661 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5662 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5663 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5665 temp
= tcg_temp_new();
5666 temp2
= tcg_temp_new();
5669 case OPC2_32_RCRW_IMASK
:
5670 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5671 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5672 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5673 tcg_gen_movi_tl(temp2
, const4
);
5674 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5676 case OPC2_32_RCRW_INSERT
:
5677 temp3
= tcg_temp_new();
5679 tcg_gen_movi_tl(temp
, width
);
5680 tcg_gen_movi_tl(temp2
, const4
);
5681 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5682 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5684 tcg_temp_free(temp3
);
5687 tcg_temp_free(temp
);
5688 tcg_temp_free(temp2
);
5693 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5701 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5702 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5703 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5704 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5705 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5708 case OPC2_32_RCR_CADD
:
5709 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5712 case OPC2_32_RCR_CADDN
:
5713 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5716 case OPC2_32_RCR_SEL
:
5717 temp
= tcg_const_i32(0);
5718 temp2
= tcg_const_i32(const9
);
5719 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5720 cpu_gpr_d
[r1
], temp2
);
5721 tcg_temp_free(temp
);
5722 tcg_temp_free(temp2
);
5724 case OPC2_32_RCR_SELN
:
5725 temp
= tcg_const_i32(0);
5726 temp2
= tcg_const_i32(const9
);
5727 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5728 cpu_gpr_d
[r1
], temp2
);
5729 tcg_temp_free(temp
);
5730 tcg_temp_free(temp2
);
5735 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5742 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5743 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5744 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5745 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5746 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5749 case OPC2_32_RCR_MADD_32
:
5750 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5752 case OPC2_32_RCR_MADD_64
:
5753 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5754 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5756 case OPC2_32_RCR_MADDS_32
:
5757 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5759 case OPC2_32_RCR_MADDS_64
:
5760 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5761 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5763 case OPC2_32_RCR_MADD_U_64
:
5764 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5765 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5766 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5768 case OPC2_32_RCR_MADDS_U_32
:
5769 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5770 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5772 case OPC2_32_RCR_MADDS_U_64
:
5773 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5774 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5775 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5780 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5787 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5788 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5789 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5790 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5791 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5794 case OPC2_32_RCR_MSUB_32
:
5795 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5797 case OPC2_32_RCR_MSUB_64
:
5798 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5799 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5801 case OPC2_32_RCR_MSUBS_32
:
5802 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5804 case OPC2_32_RCR_MSUBS_64
:
5805 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5806 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5808 case OPC2_32_RCR_MSUB_U_64
:
5809 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5810 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5811 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5813 case OPC2_32_RCR_MSUBS_U_32
:
5814 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5815 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5817 case OPC2_32_RCR_MSUBS_U_64
:
5818 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5819 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5820 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5827 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5833 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5834 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5835 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5838 case OPC1_32_RLC_ADDI
:
5839 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5841 case OPC1_32_RLC_ADDIH
:
5842 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5844 case OPC1_32_RLC_ADDIH_A
:
5845 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5847 case OPC1_32_RLC_MFCR
:
5848 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5849 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5851 case OPC1_32_RLC_MOV
:
5852 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5854 case OPC1_32_RLC_MOV_64
:
5855 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5856 if ((r2
& 0x1) != 0) {
5857 /* TODO: raise OPD trap */
5859 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5860 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5862 /* TODO: raise illegal opcode trap */
5865 case OPC1_32_RLC_MOV_U
:
5866 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5867 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5869 case OPC1_32_RLC_MOV_H
:
5870 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5872 case OPC1_32_RLC_MOVH_A
:
5873 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5875 case OPC1_32_RLC_MTCR
:
5876 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5877 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5883 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5888 r3
= MASK_OP_RR_D(ctx
->opcode
);
5889 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5890 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5891 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5894 case OPC2_32_RR_ABS
:
5895 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5897 case OPC2_32_RR_ABS_B
:
5898 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5900 case OPC2_32_RR_ABS_H
:
5901 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5903 case OPC2_32_RR_ABSDIF
:
5904 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5906 case OPC2_32_RR_ABSDIF_B
:
5907 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5910 case OPC2_32_RR_ABSDIF_H
:
5911 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5914 case OPC2_32_RR_ABSDIFS
:
5915 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5918 case OPC2_32_RR_ABSDIFS_H
:
5919 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5922 case OPC2_32_RR_ABSS
:
5923 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5925 case OPC2_32_RR_ABSS_H
:
5926 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5928 case OPC2_32_RR_ADD
:
5929 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5931 case OPC2_32_RR_ADD_B
:
5932 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5934 case OPC2_32_RR_ADD_H
:
5935 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5937 case OPC2_32_RR_ADDC
:
5938 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5940 case OPC2_32_RR_ADDS
:
5941 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5943 case OPC2_32_RR_ADDS_H
:
5944 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5947 case OPC2_32_RR_ADDS_HU
:
5948 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5951 case OPC2_32_RR_ADDS_U
:
5952 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5955 case OPC2_32_RR_ADDX
:
5956 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5958 case OPC2_32_RR_AND_EQ
:
5959 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5960 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5962 case OPC2_32_RR_AND_GE
:
5963 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5964 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5966 case OPC2_32_RR_AND_GE_U
:
5967 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5968 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5970 case OPC2_32_RR_AND_LT
:
5971 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5972 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5974 case OPC2_32_RR_AND_LT_U
:
5975 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5976 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5978 case OPC2_32_RR_AND_NE
:
5979 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5980 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5983 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5986 case OPC2_32_RR_EQ_B
:
5987 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5989 case OPC2_32_RR_EQ_H
:
5990 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5992 case OPC2_32_RR_EQ_W
:
5993 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5995 case OPC2_32_RR_EQANY_B
:
5996 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5998 case OPC2_32_RR_EQANY_H
:
5999 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6002 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6005 case OPC2_32_RR_GE_U
:
6006 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6010 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6013 case OPC2_32_RR_LT_U
:
6014 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6017 case OPC2_32_RR_LT_B
:
6018 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6020 case OPC2_32_RR_LT_BU
:
6021 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6023 case OPC2_32_RR_LT_H
:
6024 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6026 case OPC2_32_RR_LT_HU
:
6027 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6029 case OPC2_32_RR_LT_W
:
6030 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6032 case OPC2_32_RR_LT_WU
:
6033 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6035 case OPC2_32_RR_MAX
:
6036 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6037 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6039 case OPC2_32_RR_MAX_U
:
6040 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6041 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6043 case OPC2_32_RR_MAX_B
:
6044 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6046 case OPC2_32_RR_MAX_BU
:
6047 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6049 case OPC2_32_RR_MAX_H
:
6050 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6052 case OPC2_32_RR_MAX_HU
:
6053 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6055 case OPC2_32_RR_MIN
:
6056 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6057 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6059 case OPC2_32_RR_MIN_U
:
6060 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6061 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6063 case OPC2_32_RR_MIN_B
:
6064 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6066 case OPC2_32_RR_MIN_BU
:
6067 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6069 case OPC2_32_RR_MIN_H
:
6070 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6072 case OPC2_32_RR_MIN_HU
:
6073 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6075 case OPC2_32_RR_MOV
:
6076 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6079 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6082 case OPC2_32_RR_OR_EQ
:
6083 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6084 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6086 case OPC2_32_RR_OR_GE
:
6087 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6088 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6090 case OPC2_32_RR_OR_GE_U
:
6091 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6092 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6094 case OPC2_32_RR_OR_LT
:
6095 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6096 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6098 case OPC2_32_RR_OR_LT_U
:
6099 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6100 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6102 case OPC2_32_RR_OR_NE
:
6103 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6104 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6106 case OPC2_32_RR_SAT_B
:
6107 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
6109 case OPC2_32_RR_SAT_BU
:
6110 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
6112 case OPC2_32_RR_SAT_H
:
6113 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
6115 case OPC2_32_RR_SAT_HU
:
6116 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
6118 case OPC2_32_RR_SH_EQ
:
6119 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6122 case OPC2_32_RR_SH_GE
:
6123 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6126 case OPC2_32_RR_SH_GE_U
:
6127 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6130 case OPC2_32_RR_SH_LT
:
6131 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6134 case OPC2_32_RR_SH_LT_U
:
6135 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6138 case OPC2_32_RR_SH_NE
:
6139 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6142 case OPC2_32_RR_SUB
:
6143 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6145 case OPC2_32_RR_SUB_B
:
6146 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6148 case OPC2_32_RR_SUB_H
:
6149 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6151 case OPC2_32_RR_SUBC
:
6152 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6154 case OPC2_32_RR_SUBS
:
6155 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6157 case OPC2_32_RR_SUBS_U
:
6158 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6160 case OPC2_32_RR_SUBS_H
:
6161 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6164 case OPC2_32_RR_SUBS_HU
:
6165 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6168 case OPC2_32_RR_SUBX
:
6169 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6171 case OPC2_32_RR_XOR_EQ
:
6172 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6173 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6175 case OPC2_32_RR_XOR_GE
:
6176 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6177 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6179 case OPC2_32_RR_XOR_GE_U
:
6180 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6181 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6183 case OPC2_32_RR_XOR_LT
:
6184 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6185 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6187 case OPC2_32_RR_XOR_LT_U
:
6188 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6189 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6191 case OPC2_32_RR_XOR_NE
:
6192 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6193 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6198 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
6204 r3
= MASK_OP_RR_D(ctx
->opcode
);
6205 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6206 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6208 temp
= tcg_temp_new();
6209 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6212 case OPC2_32_RR_AND
:
6213 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6215 case OPC2_32_RR_ANDN
:
6216 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6218 case OPC2_32_RR_CLO
:
6219 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6221 case OPC2_32_RR_CLO_H
:
6222 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6224 case OPC2_32_RR_CLS
:
6225 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6227 case OPC2_32_RR_CLS_H
:
6228 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6230 case OPC2_32_RR_CLZ
:
6231 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6233 case OPC2_32_RR_CLZ_H
:
6234 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6236 case OPC2_32_RR_NAND
:
6237 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6239 case OPC2_32_RR_NOR
:
6240 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6243 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6245 case OPC2_32_RR_ORN
:
6246 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6249 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6251 case OPC2_32_RR_SH_H
:
6252 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6254 case OPC2_32_RR_SHA
:
6255 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6257 case OPC2_32_RR_SHA_H
:
6258 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6260 case OPC2_32_RR_SHAS
:
6261 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6263 case OPC2_32_RR_XNOR
:
6264 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6266 case OPC2_32_RR_XOR
:
6267 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6270 tcg_temp_free(temp
);
6273 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
6279 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6280 r3
= MASK_OP_RR_D(ctx
->opcode
);
6281 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6282 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6283 n
= MASK_OP_RR_N(ctx
->opcode
);
6286 case OPC2_32_RR_ADD_A
:
6287 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6289 case OPC2_32_RR_ADDSC_A
:
6290 temp
= tcg_temp_new();
6291 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6292 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6293 tcg_temp_free(temp
);
6295 case OPC2_32_RR_ADDSC_AT
:
6296 temp
= tcg_temp_new();
6297 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6298 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6299 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6300 tcg_temp_free(temp
);
6302 case OPC2_32_RR_EQ_A
:
6303 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6306 case OPC2_32_RR_EQZ
:
6307 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6309 case OPC2_32_RR_GE_A
:
6310 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6313 case OPC2_32_RR_LT_A
:
6314 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6317 case OPC2_32_RR_MOV_A
:
6318 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6320 case OPC2_32_RR_MOV_AA
:
6321 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6323 case OPC2_32_RR_MOV_D
:
6324 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6326 case OPC2_32_RR_NE_A
:
6327 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6330 case OPC2_32_RR_NEZ_A
:
6331 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6333 case OPC2_32_RR_SUB_A
:
6334 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6339 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
6344 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6345 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6349 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6351 case OPC2_32_RR_JLI
:
6352 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
6353 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6355 case OPC2_32_RR_CALLI
:
6356 gen_helper_1arg(call
, ctx
->next_pc
);
6357 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6359 case OPC2_32_RR_FCALLI
:
6360 gen_fcall_save_ctx(ctx
);
6361 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6365 ctx
->bstate
= BS_BRANCH
;
6368 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6373 TCGv temp
, temp2
, temp3
;
6375 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6376 r3
= MASK_OP_RR_D(ctx
->opcode
);
6377 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6378 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6381 case OPC2_32_RR_BMERGE
:
6382 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6384 case OPC2_32_RR_BSPLIT
:
6385 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6387 case OPC2_32_RR_DVINIT_B
:
6388 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6391 case OPC2_32_RR_DVINIT_BU
:
6392 temp
= tcg_temp_new();
6393 temp2
= tcg_temp_new();
6394 temp3
= tcg_temp_new();
6396 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6398 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6399 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6400 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6401 tcg_gen_neg_tl(temp
, temp3
);
6402 /* use cpu_PSW_AV to compare against 0 */
6403 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6405 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6406 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6407 temp2
, cpu_gpr_d
[r2
]);
6408 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6410 /* overflow = (D[b] == 0) */
6411 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6413 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6415 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6417 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6418 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6420 tcg_temp_free(temp
);
6421 tcg_temp_free(temp2
);
6422 tcg_temp_free(temp3
);
6424 case OPC2_32_RR_DVINIT_H
:
6425 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6428 case OPC2_32_RR_DVINIT_HU
:
6429 temp
= tcg_temp_new();
6430 temp2
= tcg_temp_new();
6431 temp3
= tcg_temp_new();
6433 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6435 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6436 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6437 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6438 tcg_gen_neg_tl(temp
, temp3
);
6439 /* use cpu_PSW_AV to compare against 0 */
6440 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6442 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6443 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6444 temp2
, cpu_gpr_d
[r2
]);
6445 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6447 /* overflow = (D[b] == 0) */
6448 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6450 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6452 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6454 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6455 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6456 tcg_temp_free(temp
);
6457 tcg_temp_free(temp2
);
6458 tcg_temp_free(temp3
);
6460 case OPC2_32_RR_DVINIT
:
6461 temp
= tcg_temp_new();
6462 temp2
= tcg_temp_new();
6463 /* overflow = ((D[b] == 0) ||
6464 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6465 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6466 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6467 tcg_gen_and_tl(temp
, temp
, temp2
);
6468 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6469 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6470 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6472 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6474 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6476 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6477 /* sign extend to high reg */
6478 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6479 tcg_temp_free(temp
);
6480 tcg_temp_free(temp2
);
6482 case OPC2_32_RR_DVINIT_U
:
6483 /* overflow = (D[b] == 0) */
6484 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6485 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6487 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6489 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6491 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6492 /* zero extend to high reg*/
6493 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6495 case OPC2_32_RR_PARITY
:
6496 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6498 case OPC2_32_RR_UNPACK
:
6499 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6501 case OPC2_32_RR_CRC32
:
6502 if (tricore_feature(env
, TRICORE_FEATURE_161
)) {
6503 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6504 } /* TODO: else raise illegal opcode trap */
6506 case OPC2_32_RR_DIV
:
6507 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6508 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6510 } /* TODO: else raise illegal opcode trap */
6512 case OPC2_32_RR_DIV_U
:
6513 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
6514 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6515 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6516 } /* TODO: else raise illegal opcode trap */
6522 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6530 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6531 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6532 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6533 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6534 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6537 case OPC2_32_RR1_MUL_H_32_LL
:
6538 temp64
= tcg_temp_new_i64();
6539 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6540 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6541 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6542 tcg_temp_free_i64(temp64
);
6544 case OPC2_32_RR1_MUL_H_32_LU
:
6545 temp64
= tcg_temp_new_i64();
6546 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6547 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6548 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6549 tcg_temp_free_i64(temp64
);
6551 case OPC2_32_RR1_MUL_H_32_UL
:
6552 temp64
= tcg_temp_new_i64();
6553 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6554 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6555 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6556 tcg_temp_free_i64(temp64
);
6558 case OPC2_32_RR1_MUL_H_32_UU
:
6559 temp64
= tcg_temp_new_i64();
6560 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6561 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6562 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6563 tcg_temp_free_i64(temp64
);
6565 case OPC2_32_RR1_MULM_H_64_LL
:
6566 temp64
= tcg_temp_new_i64();
6567 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6568 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6570 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6572 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6573 tcg_temp_free_i64(temp64
);
6575 case OPC2_32_RR1_MULM_H_64_LU
:
6576 temp64
= tcg_temp_new_i64();
6577 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6578 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6580 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6582 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6583 tcg_temp_free_i64(temp64
);
6585 case OPC2_32_RR1_MULM_H_64_UL
:
6586 temp64
= tcg_temp_new_i64();
6587 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6588 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6590 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6592 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6593 tcg_temp_free_i64(temp64
);
6595 case OPC2_32_RR1_MULM_H_64_UU
:
6596 temp64
= tcg_temp_new_i64();
6597 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6598 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6600 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6602 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6603 tcg_temp_free_i64(temp64
);
6606 case OPC2_32_RR1_MULR_H_16_LL
:
6607 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6608 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6610 case OPC2_32_RR1_MULR_H_16_LU
:
6611 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6612 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6614 case OPC2_32_RR1_MULR_H_16_UL
:
6615 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6616 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6618 case OPC2_32_RR1_MULR_H_16_UU
:
6619 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6620 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6626 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
6634 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6635 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6636 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6637 n
= MASK_OP_RR1_N(ctx
->opcode
);
6638 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6640 temp
= tcg_temp_new();
6641 temp2
= tcg_temp_new();
6644 case OPC2_32_RR1_MUL_Q_32
:
6645 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6647 case OPC2_32_RR1_MUL_Q_64
:
6648 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6651 case OPC2_32_RR1_MUL_Q_32_L
:
6652 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6653 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6655 case OPC2_32_RR1_MUL_Q_64_L
:
6656 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6657 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6659 case OPC2_32_RR1_MUL_Q_32_U
:
6660 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6661 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6663 case OPC2_32_RR1_MUL_Q_64_U
:
6664 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6665 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6667 case OPC2_32_RR1_MUL_Q_32_LL
:
6668 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6669 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6670 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6672 case OPC2_32_RR1_MUL_Q_32_UU
:
6673 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6674 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6675 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6677 case OPC2_32_RR1_MULR_Q_32_L
:
6678 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6679 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6680 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6682 case OPC2_32_RR1_MULR_Q_32_U
:
6683 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6684 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6685 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6688 tcg_temp_free(temp
);
6689 tcg_temp_free(temp2
);
6693 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6698 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6699 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6700 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6701 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6703 case OPC2_32_RR2_MUL_32
:
6704 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6706 case OPC2_32_RR2_MUL_64
:
6707 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6710 case OPC2_32_RR2_MULS_32
:
6711 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6714 case OPC2_32_RR2_MUL_U_64
:
6715 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6718 case OPC2_32_RR2_MULS_U_32
:
6719 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6726 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6732 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6733 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6734 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6735 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6736 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6737 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6740 case OPC2_32_RRPW_EXTR
:
6741 if (pos
+ width
<= 31) {
6742 /* optimize special cases */
6743 if ((pos
== 0) && (width
== 8)) {
6744 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6745 } else if ((pos
== 0) && (width
== 16)) {
6746 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6748 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6749 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6753 case OPC2_32_RRPW_EXTR_U
:
6755 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6757 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6758 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6761 case OPC2_32_RRPW_IMASK
:
6762 if (pos
+ width
<= 31) {
6763 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6764 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6767 case OPC2_32_RRPW_INSERT
:
6768 if (pos
+ width
<= 31) {
6769 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6777 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6783 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6784 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6785 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6786 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6787 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6790 case OPC2_32_RRR_CADD
:
6791 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6792 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6794 case OPC2_32_RRR_CADDN
:
6795 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6798 case OPC2_32_RRR_CSUB
:
6799 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6802 case OPC2_32_RRR_CSUBN
:
6803 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6806 case OPC2_32_RRR_SEL
:
6807 temp
= tcg_const_i32(0);
6808 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6809 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6810 tcg_temp_free(temp
);
6812 case OPC2_32_RRR_SELN
:
6813 temp
= tcg_const_i32(0);
6814 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6815 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6816 tcg_temp_free(temp
);
6821 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6827 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6828 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6829 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6830 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6831 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6834 case OPC2_32_RRR_DVADJ
:
6835 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6836 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6838 case OPC2_32_RRR_DVSTEP
:
6839 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6840 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6842 case OPC2_32_RRR_DVSTEP_U
:
6843 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6844 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6846 case OPC2_32_RRR_IXMAX
:
6847 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6848 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6850 case OPC2_32_RRR_IXMAX_U
:
6851 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6852 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6854 case OPC2_32_RRR_IXMIN
:
6855 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6856 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6858 case OPC2_32_RRR_IXMIN_U
:
6859 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6860 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6862 case OPC2_32_RRR_PACK
:
6863 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6864 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6870 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6873 uint32_t r1
, r2
, r3
, r4
;
6875 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6876 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6877 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6878 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6879 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6881 case OPC2_32_RRR2_MADD_32
:
6882 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6885 case OPC2_32_RRR2_MADD_64
:
6886 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6887 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6889 case OPC2_32_RRR2_MADDS_32
:
6890 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6891 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6893 case OPC2_32_RRR2_MADDS_64
:
6894 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6895 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6897 case OPC2_32_RRR2_MADD_U_64
:
6898 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6899 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6901 case OPC2_32_RRR2_MADDS_U_32
:
6902 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6903 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6905 case OPC2_32_RRR2_MADDS_U_64
:
6906 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6907 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6912 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6915 uint32_t r1
, r2
, r3
, r4
;
6917 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6918 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6919 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6920 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6921 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6924 case OPC2_32_RRR2_MSUB_32
:
6925 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6928 case OPC2_32_RRR2_MSUB_64
:
6929 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6930 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6932 case OPC2_32_RRR2_MSUBS_32
:
6933 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6934 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6936 case OPC2_32_RRR2_MSUBS_64
:
6937 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6938 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6940 case OPC2_32_RRR2_MSUB_U_64
:
6941 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6942 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6944 case OPC2_32_RRR2_MSUBS_U_32
:
6945 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6946 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6948 case OPC2_32_RRR2_MSUBS_U_64
:
6949 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6950 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6956 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6959 uint32_t r1
, r2
, r3
, r4
, n
;
6961 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6962 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6963 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6964 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6965 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6966 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6969 case OPC2_32_RRR1_MADD_H_LL
:
6970 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6971 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6973 case OPC2_32_RRR1_MADD_H_LU
:
6974 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6975 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6977 case OPC2_32_RRR1_MADD_H_UL
:
6978 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6979 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6981 case OPC2_32_RRR1_MADD_H_UU
:
6982 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6983 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6985 case OPC2_32_RRR1_MADDS_H_LL
:
6986 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6987 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6989 case OPC2_32_RRR1_MADDS_H_LU
:
6990 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6991 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6993 case OPC2_32_RRR1_MADDS_H_UL
:
6994 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6995 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6997 case OPC2_32_RRR1_MADDS_H_UU
:
6998 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6999 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7001 case OPC2_32_RRR1_MADDM_H_LL
:
7002 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7003 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7005 case OPC2_32_RRR1_MADDM_H_LU
:
7006 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7007 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7009 case OPC2_32_RRR1_MADDM_H_UL
:
7010 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7011 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7013 case OPC2_32_RRR1_MADDM_H_UU
:
7014 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7015 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7017 case OPC2_32_RRR1_MADDMS_H_LL
:
7018 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7019 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7021 case OPC2_32_RRR1_MADDMS_H_LU
:
7022 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7023 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7025 case OPC2_32_RRR1_MADDMS_H_UL
:
7026 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7027 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7029 case OPC2_32_RRR1_MADDMS_H_UU
:
7030 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7031 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7033 case OPC2_32_RRR1_MADDR_H_LL
:
7034 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7035 cpu_gpr_d
[r2
], n
, MODE_LL
);
7037 case OPC2_32_RRR1_MADDR_H_LU
:
7038 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7039 cpu_gpr_d
[r2
], n
, MODE_LU
);
7041 case OPC2_32_RRR1_MADDR_H_UL
:
7042 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7043 cpu_gpr_d
[r2
], n
, MODE_UL
);
7045 case OPC2_32_RRR1_MADDR_H_UU
:
7046 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7047 cpu_gpr_d
[r2
], n
, MODE_UU
);
7049 case OPC2_32_RRR1_MADDRS_H_LL
:
7050 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7051 cpu_gpr_d
[r2
], n
, MODE_LL
);
7053 case OPC2_32_RRR1_MADDRS_H_LU
:
7054 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7055 cpu_gpr_d
[r2
], n
, MODE_LU
);
7057 case OPC2_32_RRR1_MADDRS_H_UL
:
7058 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7059 cpu_gpr_d
[r2
], n
, MODE_UL
);
7061 case OPC2_32_RRR1_MADDRS_H_UU
:
7062 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7063 cpu_gpr_d
[r2
], n
, MODE_UU
);
7068 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7071 uint32_t r1
, r2
, r3
, r4
, n
;
7074 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7075 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7076 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7077 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7078 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7079 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7081 temp
= tcg_const_i32(n
);
7082 temp2
= tcg_temp_new();
7085 case OPC2_32_RRR1_MADD_Q_32
:
7086 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7087 cpu_gpr_d
[r2
], n
, 32, env
);
7089 case OPC2_32_RRR1_MADD_Q_64
:
7090 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7091 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7094 case OPC2_32_RRR1_MADD_Q_32_L
:
7095 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7096 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7099 case OPC2_32_RRR1_MADD_Q_64_L
:
7100 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7101 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7102 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7105 case OPC2_32_RRR1_MADD_Q_32_U
:
7106 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7107 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7110 case OPC2_32_RRR1_MADD_Q_64_U
:
7111 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7112 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7113 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7116 case OPC2_32_RRR1_MADD_Q_32_LL
:
7117 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7118 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7119 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7121 case OPC2_32_RRR1_MADD_Q_64_LL
:
7122 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7123 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7124 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7125 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7127 case OPC2_32_RRR1_MADD_Q_32_UU
:
7128 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7129 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7130 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7132 case OPC2_32_RRR1_MADD_Q_64_UU
:
7133 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7134 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7135 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7136 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7138 case OPC2_32_RRR1_MADDS_Q_32
:
7139 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7140 cpu_gpr_d
[r2
], n
, 32);
7142 case OPC2_32_RRR1_MADDS_Q_64
:
7143 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7144 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7147 case OPC2_32_RRR1_MADDS_Q_32_L
:
7148 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7149 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7152 case OPC2_32_RRR1_MADDS_Q_64_L
:
7153 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7154 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7155 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7158 case OPC2_32_RRR1_MADDS_Q_32_U
:
7159 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7160 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7163 case OPC2_32_RRR1_MADDS_Q_64_U
:
7164 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7165 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7166 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7169 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7170 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7171 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7172 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7174 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7175 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7176 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7177 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7178 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7180 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7181 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7182 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7183 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7185 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7186 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7187 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7188 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7189 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7191 case OPC2_32_RRR1_MADDR_H_64_UL
:
7192 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7193 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7195 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7196 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7197 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7199 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7200 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7201 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7202 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7204 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7205 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7206 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7207 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7209 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7210 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7211 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7212 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7214 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7215 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7216 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7217 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7220 tcg_temp_free(temp
);
7221 tcg_temp_free(temp2
);
7224 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7227 uint32_t r1
, r2
, r3
, r4
, n
;
7229 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7230 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7231 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7232 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7233 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7234 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7237 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7238 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7239 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7241 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7242 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7243 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7245 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7246 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7247 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7249 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7250 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7251 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7253 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7254 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7255 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7258 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7259 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7260 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7263 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7264 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7265 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7268 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7269 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7270 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7273 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7274 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7275 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7278 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7279 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7280 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7283 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7284 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7285 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7288 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7289 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7290 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7293 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7294 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7295 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7298 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7299 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7300 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7303 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7304 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7305 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7308 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7309 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7310 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7313 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7314 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7315 cpu_gpr_d
[r2
], n
, MODE_LL
);
7317 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7318 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7319 cpu_gpr_d
[r2
], n
, MODE_LU
);
7321 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7322 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7323 cpu_gpr_d
[r2
], n
, MODE_UL
);
7325 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7326 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7327 cpu_gpr_d
[r2
], n
, MODE_UU
);
7329 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7330 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7331 cpu_gpr_d
[r2
], n
, MODE_LL
);
7333 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7334 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7335 cpu_gpr_d
[r2
], n
, MODE_LU
);
7337 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7338 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7339 cpu_gpr_d
[r2
], n
, MODE_UL
);
7341 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7342 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7343 cpu_gpr_d
[r2
], n
, MODE_UU
);
7348 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
7351 uint32_t r1
, r2
, r3
, r4
, n
;
7353 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7354 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7355 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7356 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7357 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7358 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7361 case OPC2_32_RRR1_MSUB_H_LL
:
7362 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7363 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7365 case OPC2_32_RRR1_MSUB_H_LU
:
7366 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7367 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7369 case OPC2_32_RRR1_MSUB_H_UL
:
7370 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7371 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7373 case OPC2_32_RRR1_MSUB_H_UU
:
7374 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7375 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7377 case OPC2_32_RRR1_MSUBS_H_LL
:
7378 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7379 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7381 case OPC2_32_RRR1_MSUBS_H_LU
:
7382 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7383 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7385 case OPC2_32_RRR1_MSUBS_H_UL
:
7386 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7387 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7389 case OPC2_32_RRR1_MSUBS_H_UU
:
7390 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7391 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7393 case OPC2_32_RRR1_MSUBM_H_LL
:
7394 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7395 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7397 case OPC2_32_RRR1_MSUBM_H_LU
:
7398 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7399 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7401 case OPC2_32_RRR1_MSUBM_H_UL
:
7402 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7403 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7405 case OPC2_32_RRR1_MSUBM_H_UU
:
7406 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7407 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7409 case OPC2_32_RRR1_MSUBMS_H_LL
:
7410 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7411 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7413 case OPC2_32_RRR1_MSUBMS_H_LU
:
7414 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7415 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7417 case OPC2_32_RRR1_MSUBMS_H_UL
:
7418 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7419 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7421 case OPC2_32_RRR1_MSUBMS_H_UU
:
7422 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7423 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7425 case OPC2_32_RRR1_MSUBR_H_LL
:
7426 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7427 cpu_gpr_d
[r2
], n
, MODE_LL
);
7429 case OPC2_32_RRR1_MSUBR_H_LU
:
7430 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7431 cpu_gpr_d
[r2
], n
, MODE_LU
);
7433 case OPC2_32_RRR1_MSUBR_H_UL
:
7434 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7435 cpu_gpr_d
[r2
], n
, MODE_UL
);
7437 case OPC2_32_RRR1_MSUBR_H_UU
:
7438 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7439 cpu_gpr_d
[r2
], n
, MODE_UU
);
7441 case OPC2_32_RRR1_MSUBRS_H_LL
:
7442 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7443 cpu_gpr_d
[r2
], n
, MODE_LL
);
7445 case OPC2_32_RRR1_MSUBRS_H_LU
:
7446 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7447 cpu_gpr_d
[r2
], n
, MODE_LU
);
7449 case OPC2_32_RRR1_MSUBRS_H_UL
:
7450 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7451 cpu_gpr_d
[r2
], n
, MODE_UL
);
7453 case OPC2_32_RRR1_MSUBRS_H_UU
:
7454 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7455 cpu_gpr_d
[r2
], n
, MODE_UU
);
7460 static void decode_rrr1_msubq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7463 uint32_t r1
, r2
, r3
, r4
, n
;
7466 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7467 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7468 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7469 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7470 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7471 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7473 temp
= tcg_const_i32(n
);
7474 temp2
= tcg_temp_new();
7477 case OPC2_32_RRR1_MSUB_Q_32
:
7478 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7479 cpu_gpr_d
[r2
], n
, 32, env
);
7481 case OPC2_32_RRR1_MSUB_Q_64
:
7482 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7483 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7486 case OPC2_32_RRR1_MSUB_Q_32_L
:
7487 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7488 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7491 case OPC2_32_RRR1_MSUB_Q_64_L
:
7492 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7493 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7494 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7497 case OPC2_32_RRR1_MSUB_Q_32_U
:
7498 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7499 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7502 case OPC2_32_RRR1_MSUB_Q_64_U
:
7503 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7504 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7505 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7508 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7509 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7510 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7511 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7513 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7514 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7515 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7516 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7517 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7519 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7520 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7521 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7522 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7524 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7525 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7526 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7527 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7528 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7530 case OPC2_32_RRR1_MSUBS_Q_32
:
7531 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7532 cpu_gpr_d
[r2
], n
, 32);
7534 case OPC2_32_RRR1_MSUBS_Q_64
:
7535 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7536 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7539 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7540 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7541 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7544 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7545 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7546 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7547 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7550 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7551 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7552 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7555 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7556 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7557 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7558 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7561 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7562 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7563 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7564 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7566 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7567 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7568 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7569 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7570 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7572 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7573 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7574 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7575 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7577 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7578 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7579 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7580 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7581 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7583 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7584 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7585 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7587 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7588 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7589 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7591 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7592 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7593 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7594 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7596 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7597 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7598 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7599 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7601 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7602 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7603 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7604 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7606 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7607 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7608 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7609 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7612 tcg_temp_free(temp
);
7613 tcg_temp_free(temp2
);
7616 static void decode_rrr1_msubad_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7619 uint32_t r1
, r2
, r3
, r4
, n
;
7621 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7622 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7623 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7624 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7625 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7626 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7629 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7630 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7631 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7633 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7634 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7635 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7637 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7638 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7639 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7641 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7642 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7643 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7645 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7646 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7647 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7650 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7651 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7652 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7655 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7656 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7657 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7660 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7661 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7662 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7665 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7666 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7667 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7670 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7671 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7672 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7675 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7676 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7677 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7680 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7681 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7682 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7685 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7686 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7687 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7690 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7691 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7692 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7695 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7696 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7697 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7700 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7701 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7702 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7705 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7706 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7707 cpu_gpr_d
[r2
], n
, MODE_LL
);
7709 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7710 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7711 cpu_gpr_d
[r2
], n
, MODE_LU
);
7713 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7714 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7715 cpu_gpr_d
[r2
], n
, MODE_UL
);
7717 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7718 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7719 cpu_gpr_d
[r2
], n
, MODE_UU
);
7721 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7722 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7723 cpu_gpr_d
[r2
], n
, MODE_LL
);
7725 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7726 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7727 cpu_gpr_d
[r2
], n
, MODE_LU
);
7729 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7730 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7731 cpu_gpr_d
[r2
], n
, MODE_UL
);
7733 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7734 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7735 cpu_gpr_d
[r2
], n
, MODE_UU
);
7741 static void decode_rrrr_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7745 TCGv tmp_width
, tmp_pos
;
7747 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7748 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7749 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7750 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7751 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7753 tmp_pos
= tcg_temp_new();
7754 tmp_width
= tcg_temp_new();
7757 case OPC2_32_RRRR_DEXTR
:
7758 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7760 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7762 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7763 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7764 tcg_gen_shr_tl(tmp_pos
, cpu_gpr_d
[r2
], tmp_pos
);
7765 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, tmp_pos
);
7768 case OPC2_32_RRRR_EXTR
:
7769 case OPC2_32_RRRR_EXTR_U
:
7770 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7771 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7772 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7773 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7774 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7775 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7776 if (op2
== OPC2_32_RRRR_EXTR
) {
7777 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7779 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7782 case OPC2_32_RRRR_INSERT
:
7783 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7784 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7785 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7789 tcg_temp_free(tmp_pos
);
7790 tcg_temp_free(tmp_width
);
7794 static void decode_rrrw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7802 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7803 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7804 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7805 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7806 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7807 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7809 temp
= tcg_temp_new();
7812 case OPC2_32_RRRW_EXTR
:
7813 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7814 tcg_gen_addi_tl(temp
, temp
, width
);
7815 tcg_gen_subfi_tl(temp
, 32, temp
);
7816 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7817 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7819 case OPC2_32_RRRW_EXTR_U
:
7821 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7823 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7824 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7825 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7828 case OPC2_32_RRRW_IMASK
:
7829 temp2
= tcg_temp_new();
7831 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7832 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7833 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7834 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7835 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7837 tcg_temp_free(temp2
);
7839 case OPC2_32_RRRW_INSERT
:
7840 temp2
= tcg_temp_new();
7842 tcg_gen_movi_tl(temp
, width
);
7843 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7844 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7846 tcg_temp_free(temp2
);
7849 tcg_temp_free(temp
);
7853 static void decode_sys_interrupts(CPUTriCoreState
*env
, DisasContext
*ctx
)
7860 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7861 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7864 case OPC2_32_SYS_DEBUG
:
7865 /* raise EXCP_DEBUG */
7867 case OPC2_32_SYS_DISABLE
:
7868 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE
);
7870 case OPC2_32_SYS_DSYNC
:
7872 case OPC2_32_SYS_ENABLE
:
7873 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE
);
7875 case OPC2_32_SYS_ISYNC
:
7877 case OPC2_32_SYS_NOP
:
7879 case OPC2_32_SYS_RET
:
7880 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7882 case OPC2_32_SYS_FRET
:
7885 case OPC2_32_SYS_RFE
:
7886 gen_helper_rfe(cpu_env
);
7888 ctx
->bstate
= BS_BRANCH
;
7890 case OPC2_32_SYS_RFM
:
7891 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7892 tmp
= tcg_temp_new();
7893 l1
= gen_new_label();
7895 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7896 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7897 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7898 gen_helper_rfm(cpu_env
);
7901 ctx
->bstate
= BS_BRANCH
;
7904 /* generate privilege trap */
7907 case OPC2_32_SYS_RSLCX
:
7908 gen_helper_rslcx(cpu_env
);
7910 case OPC2_32_SYS_SVLCX
:
7911 gen_helper_svlcx(cpu_env
);
7913 case OPC2_32_SYS_RESTORE
:
7914 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
7915 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7916 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7917 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7918 } /* else raise privilege trap */
7919 } /* else raise illegal opcode trap */
7921 case OPC2_32_SYS_TRAPSV
:
7922 /* TODO: raise sticky overflow trap */
7924 case OPC2_32_SYS_TRAPV
:
7925 /* TODO: raise overflow trap */
7930 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
7934 int32_t address
, const16
;
7937 TCGv temp
, temp2
, temp3
;
7939 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7941 /* handle JNZ.T opcode only being 7 bit long */
7942 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7943 op1
= OPCM_32_BRN_JTT
;
7948 case OPCM_32_ABS_LDW
:
7949 decode_abs_ldw(env
, ctx
);
7951 case OPCM_32_ABS_LDB
:
7952 decode_abs_ldb(env
, ctx
);
7954 case OPCM_32_ABS_LDMST_SWAP
:
7955 decode_abs_ldst_swap(env
, ctx
);
7957 case OPCM_32_ABS_LDST_CONTEXT
:
7958 decode_abs_ldst_context(env
, ctx
);
7960 case OPCM_32_ABS_STORE
:
7961 decode_abs_store(env
, ctx
);
7963 case OPCM_32_ABS_STOREB_H
:
7964 decode_abs_storeb_h(env
, ctx
);
7966 case OPC1_32_ABS_STOREQ
:
7967 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7968 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7969 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7970 temp2
= tcg_temp_new();
7972 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7973 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7975 tcg_temp_free(temp2
);
7976 tcg_temp_free(temp
);
7978 case OPC1_32_ABS_LD_Q
:
7979 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7980 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7981 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7983 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7984 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7986 tcg_temp_free(temp
);
7988 case OPC1_32_ABS_LEA
:
7989 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7990 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7991 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7994 case OPC1_32_ABSB_ST_T
:
7995 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7996 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7997 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7999 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
8000 temp2
= tcg_temp_new();
8002 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8003 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
8004 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
8005 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8007 tcg_temp_free(temp
);
8008 tcg_temp_free(temp2
);
8011 case OPC1_32_B_CALL
:
8012 case OPC1_32_B_CALLA
:
8013 case OPC1_32_B_FCALL
:
8014 case OPC1_32_B_FCALLA
:
8019 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8020 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8023 case OPCM_32_BIT_ANDACC
:
8024 decode_bit_andacc(env
, ctx
);
8026 case OPCM_32_BIT_LOGICAL_T1
:
8027 decode_bit_logical_t(env
, ctx
);
8029 case OPCM_32_BIT_INSERT
:
8030 decode_bit_insert(env
, ctx
);
8032 case OPCM_32_BIT_LOGICAL_T2
:
8033 decode_bit_logical_t2(env
, ctx
);
8035 case OPCM_32_BIT_ORAND
:
8036 decode_bit_orand(env
, ctx
);
8038 case OPCM_32_BIT_SH_LOGIC1
:
8039 decode_bit_sh_logic1(env
, ctx
);
8041 case OPCM_32_BIT_SH_LOGIC2
:
8042 decode_bit_sh_logic2(env
, ctx
);
8045 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8046 decode_bo_addrmode_post_pre_base(env
, ctx
);
8048 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8049 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
8051 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8052 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
8054 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8055 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
8057 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8058 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
8060 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8061 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
8064 case OPC1_32_BOL_LD_A_LONGOFF
:
8065 case OPC1_32_BOL_LD_W_LONGOFF
:
8066 case OPC1_32_BOL_LEA_LONGOFF
:
8067 case OPC1_32_BOL_ST_W_LONGOFF
:
8068 case OPC1_32_BOL_ST_A_LONGOFF
:
8069 case OPC1_32_BOL_LD_B_LONGOFF
:
8070 case OPC1_32_BOL_LD_BU_LONGOFF
:
8071 case OPC1_32_BOL_LD_H_LONGOFF
:
8072 case OPC1_32_BOL_LD_HU_LONGOFF
:
8073 case OPC1_32_BOL_ST_B_LONGOFF
:
8074 case OPC1_32_BOL_ST_H_LONGOFF
:
8075 decode_bol_opc(env
, ctx
, op1
);
8078 case OPCM_32_BRC_EQ_NEQ
:
8079 case OPCM_32_BRC_GE
:
8080 case OPCM_32_BRC_JLT
:
8081 case OPCM_32_BRC_JNE
:
8082 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8083 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8084 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8085 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8088 case OPCM_32_BRN_JTT
:
8089 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8090 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8091 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8094 case OPCM_32_BRR_EQ_NEQ
:
8095 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8096 case OPCM_32_BRR_GE
:
8097 case OPCM_32_BRR_JLT
:
8098 case OPCM_32_BRR_JNE
:
8099 case OPCM_32_BRR_JNZ
:
8100 case OPCM_32_BRR_LOOP
:
8101 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8102 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8103 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8104 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8107 case OPCM_32_RC_LOGICAL_SHIFT
:
8108 decode_rc_logical_shift(env
, ctx
);
8110 case OPCM_32_RC_ACCUMULATOR
:
8111 decode_rc_accumulator(env
, ctx
);
8113 case OPCM_32_RC_SERVICEROUTINE
:
8114 decode_rc_serviceroutine(env
, ctx
);
8116 case OPCM_32_RC_MUL
:
8117 decode_rc_mul(env
, ctx
);
8120 case OPCM_32_RCPW_MASK_INSERT
:
8121 decode_rcpw_insert(env
, ctx
);
8124 case OPC1_32_RCRR_INSERT
:
8125 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8126 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8127 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8128 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8129 temp
= tcg_const_i32(const16
);
8130 temp2
= tcg_temp_new(); /* width*/
8131 temp3
= tcg_temp_new(); /* pos */
8133 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8134 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8136 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8138 tcg_temp_free(temp
);
8139 tcg_temp_free(temp2
);
8140 tcg_temp_free(temp3
);
8143 case OPCM_32_RCRW_MASK_INSERT
:
8144 decode_rcrw_insert(env
, ctx
);
8147 case OPCM_32_RCR_COND_SELECT
:
8148 decode_rcr_cond_select(env
, ctx
);
8150 case OPCM_32_RCR_MADD
:
8151 decode_rcr_madd(env
, ctx
);
8153 case OPCM_32_RCR_MSUB
:
8154 decode_rcr_msub(env
, ctx
);
8157 case OPC1_32_RLC_ADDI
:
8158 case OPC1_32_RLC_ADDIH
:
8159 case OPC1_32_RLC_ADDIH_A
:
8160 case OPC1_32_RLC_MFCR
:
8161 case OPC1_32_RLC_MOV
:
8162 case OPC1_32_RLC_MOV_64
:
8163 case OPC1_32_RLC_MOV_U
:
8164 case OPC1_32_RLC_MOV_H
:
8165 case OPC1_32_RLC_MOVH_A
:
8166 case OPC1_32_RLC_MTCR
:
8167 decode_rlc_opc(env
, ctx
, op1
);
8170 case OPCM_32_RR_ACCUMULATOR
:
8171 decode_rr_accumulator(env
, ctx
);
8173 case OPCM_32_RR_LOGICAL_SHIFT
:
8174 decode_rr_logical_shift(env
, ctx
);
8176 case OPCM_32_RR_ADDRESS
:
8177 decode_rr_address(env
, ctx
);
8179 case OPCM_32_RR_IDIRECT
:
8180 decode_rr_idirect(env
, ctx
);
8182 case OPCM_32_RR_DIVIDE
:
8183 decode_rr_divide(env
, ctx
);
8186 case OPCM_32_RR1_MUL
:
8187 decode_rr1_mul(env
, ctx
);
8189 case OPCM_32_RR1_MULQ
:
8190 decode_rr1_mulq(env
, ctx
);
8193 case OPCM_32_RR2_MUL
:
8194 decode_rr2_mul(env
, ctx
);
8197 case OPCM_32_RRPW_EXTRACT_INSERT
:
8198 decode_rrpw_extract_insert(env
, ctx
);
8200 case OPC1_32_RRPW_DEXTR
:
8201 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8202 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8203 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8204 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8206 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
8208 temp
= tcg_temp_new();
8209 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], const16
);
8210 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], 32 - const16
);
8211 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
8212 tcg_temp_free(temp
);
8216 case OPCM_32_RRR_COND_SELECT
:
8217 decode_rrr_cond_select(env
, ctx
);
8219 case OPCM_32_RRR_DIVIDE
:
8220 decode_rrr_divide(env
, ctx
);
8222 case OPCM_32_RRR2_MADD
:
8223 decode_rrr2_madd(env
, ctx
);
8225 case OPCM_32_RRR2_MSUB
:
8226 decode_rrr2_msub(env
, ctx
);
8229 case OPCM_32_RRR1_MADD
:
8230 decode_rrr1_madd(env
, ctx
);
8232 case OPCM_32_RRR1_MADDQ_H
:
8233 decode_rrr1_maddq_h(env
, ctx
);
8235 case OPCM_32_RRR1_MADDSU_H
:
8236 decode_rrr1_maddsu_h(env
, ctx
);
8238 case OPCM_32_RRR1_MSUB_H
:
8239 decode_rrr1_msub(env
, ctx
);
8241 case OPCM_32_RRR1_MSUB_Q
:
8242 decode_rrr1_msubq_h(env
, ctx
);
8244 case OPCM_32_RRR1_MSUBAD_H
:
8245 decode_rrr1_msubad_h(env
, ctx
);
8248 case OPCM_32_RRRR_EXTRACT_INSERT
:
8249 decode_rrrr_extract_insert(env
, ctx
);
8251 case OPCM_32_RRRW_EXTRACT_INSERT
:
8252 decode_rrrw_extract_insert(env
, ctx
);
8255 case OPCM_32_SYS_INTERRUPTS
:
8256 decode_sys_interrupts(env
, ctx
);
8258 case OPC1_32_SYS_RSTV
:
8259 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8260 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8261 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8262 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8267 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
8269 /* 16-Bit Instruction */
8270 if ((ctx
->opcode
& 0x1) == 0) {
8271 ctx
->next_pc
= ctx
->pc
+ 2;
8272 decode_16Bit_opc(env
, ctx
);
8273 /* 32-Bit Instruction */
8275 ctx
->next_pc
= ctx
->pc
+ 4;
8276 decode_32Bit_opc(env
, ctx
);
8281 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
8284 CPUState
*cs
= CPU(cpu
);
8285 CPUTriCoreState
*env
= &cpu
->env
;
8287 target_ulong pc_start
;
8291 qemu_log("search pc %d\n", search_pc
);
8299 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
8300 ctx
.bstate
= BS_NONE
;
8301 ctx
.mem_idx
= cpu_mmu_index(env
);
8303 tcg_clear_temp_count();
8305 while (ctx
.bstate
== BS_NONE
) {
8306 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
8307 decode_opc(env
, &ctx
, 0);
8311 if (tcg_op_buf_full()) {
8312 gen_save_pc(ctx
.next_pc
);
8317 gen_save_pc(ctx
.next_pc
);
8321 ctx
.pc
= ctx
.next_pc
;
8324 gen_tb_end(tb
, num_insns
);
8326 printf("done_generating search pc\n");
8328 tb
->size
= ctx
.pc
- pc_start
;
8329 tb
->icount
= num_insns
;
8331 if (tcg_check_temp_count()) {
8332 printf("LEAK at %08x\n", env
->PC
);
8336 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8337 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8338 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
8345 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8347 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
8351 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8353 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
8357 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
8359 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
8367 void cpu_state_reset(CPUTriCoreState
*env
)
8369 /* Reset Regs to Default Value */
8373 static void tricore_tcg_init_csfr(void)
8375 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
8376 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8377 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
8378 offsetof(CPUTriCoreState
, PSW
), "PSW");
8379 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
8380 offsetof(CPUTriCoreState
, PC
), "PC");
8381 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
8382 offsetof(CPUTriCoreState
, ICR
), "ICR");
8385 void tricore_tcg_init(void)
8392 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8394 for (i
= 0 ; i
< 16 ; i
++) {
8395 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
8396 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8399 for (i
= 0 ; i
< 16 ; i
++) {
8400 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
8401 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8404 tricore_tcg_init_csfr();
8405 /* init PSW flag cache */
8406 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
8407 offsetof(CPUTriCoreState
, PSW_USB_C
),
8409 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
8410 offsetof(CPUTriCoreState
, PSW_USB_V
),
8412 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
8413 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8415 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
8416 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8418 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
8419 offsetof(CPUTriCoreState
, PSW_USB_SAV
),