2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
205 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
206 ((offset & 0x0fffff) << 1))
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, TCGMemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
219 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
233 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
235 tcg_temp_free_i64(temp
);
238 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
241 TCGv temp
= tcg_temp_new();
242 tcg_gen_addi_tl(temp
, base
, con
);
243 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
247 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
249 TCGv_i64 temp
= tcg_temp_new_i64();
251 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
252 /* write back to two 32 bit regs */
253 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
255 tcg_temp_free_i64(temp
);
258 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, base
, con
);
263 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
267 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
277 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
288 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
290 TCGv temp
= tcg_temp_new();
291 TCGv temp2
= tcg_temp_new();
293 /* temp = (M(EA, word) */
294 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* temp = temp & ~E[a][63:32]) */
296 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
297 /* temp2 = (E[a][31:0] & E[a][63:32]); */
298 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
299 /* temp = temp | temp2; */
300 tcg_gen_or_tl(temp
, temp
, temp2
);
301 /* M(EA, word) = temp; */
302 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_temp_free(temp2
);
308 /* tmp = M(EA, word);
311 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
315 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
322 /* We generate loads and store to core special function register (csfr) through
323 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
324 makros R, A and E, which allow read-only, all and endinit protected access.
325 These makros also specify in which ISA version the csfr was introduced. */
326 #define R(ADDRESS, REG, FEATURE) \
328 if (tricore_feature(env, FEATURE)) { \
329 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
332 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
333 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
334 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
336 /* since we're caching PSW make this a special case */
337 if (offset
== 0xfe04) {
338 gen_helper_psw_read(ret
, cpu_env
);
349 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
350 since no execption occurs */
351 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
353 if (tricore_feature(env, FEATURE)) { \
354 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
357 /* Endinit protected registers
358 TODO: Since the endinit bit is in a register of a not yet implemented
359 watchdog device, we handle endinit protected registers like
360 all-access registers for now. */
361 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
362 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
365 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
366 /* since we're caching PSW make this a special case */
367 if (offset
== 0xfe04) {
368 gen_helper_psw_write(cpu_env
, r1
);
375 /* generate privilege trap */
379 /* Functions for arithmetic instructions */
381 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
383 TCGv t0
= tcg_temp_new_i32();
384 TCGv result
= tcg_temp_new_i32();
385 /* Addition and set V/SV bits */
386 tcg_gen_add_tl(result
, r1
, r2
);
388 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
389 tcg_gen_xor_tl(t0
, r1
, r2
);
390 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
392 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
393 /* Calc AV/SAV bits */
394 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
395 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
397 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
398 /* write back result */
399 tcg_gen_mov_tl(ret
, result
);
401 tcg_temp_free(result
);
406 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
408 TCGv temp
= tcg_temp_new();
409 TCGv_i64 t0
= tcg_temp_new_i64();
410 TCGv_i64 t1
= tcg_temp_new_i64();
411 TCGv_i64 result
= tcg_temp_new_i64();
413 tcg_gen_add_i64(result
, r1
, r2
);
415 tcg_gen_xor_i64(t1
, result
, r1
);
416 tcg_gen_xor_i64(t0
, r1
, r2
);
417 tcg_gen_andc_i64(t1
, t1
, t0
);
418 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
420 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
421 /* calc AV/SAV bits */
422 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
423 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
424 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
426 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
427 /* write back result */
428 tcg_gen_mov_i64(ret
, result
);
431 tcg_temp_free_i64(result
);
432 tcg_temp_free_i64(t0
);
433 tcg_temp_free_i64(t1
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 tcg_temp_free(temp2
);
483 tcg_temp_free(temp3
);
484 tcg_temp_free(temp4
);
487 /* ret = r2 + (r1 * r3); */
488 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
490 TCGv_i64 t1
= tcg_temp_new_i64();
491 TCGv_i64 t2
= tcg_temp_new_i64();
492 TCGv_i64 t3
= tcg_temp_new_i64();
494 tcg_gen_ext_i32_i64(t1
, r1
);
495 tcg_gen_ext_i32_i64(t2
, r2
);
496 tcg_gen_ext_i32_i64(t3
, r3
);
498 tcg_gen_mul_i64(t1
, t1
, t3
);
499 tcg_gen_add_i64(t1
, t2
, t1
);
501 tcg_gen_trunc_i64_i32(ret
, t1
);
504 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
505 /* t1 < -0x80000000 */
506 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
507 tcg_gen_or_i64(t2
, t2
, t3
);
508 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 /* Calc AV/SAV bits */
513 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
514 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
516 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
518 tcg_temp_free_i64(t1
);
519 tcg_temp_free_i64(t2
);
520 tcg_temp_free_i64(t3
);
523 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
525 TCGv temp
= tcg_const_i32(con
);
526 gen_madd32_d(ret
, r1
, r2
, temp
);
531 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
534 TCGv t1
= tcg_temp_new();
535 TCGv t2
= tcg_temp_new();
536 TCGv t3
= tcg_temp_new();
537 TCGv t4
= tcg_temp_new();
539 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
540 /* only the add can overflow */
541 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
543 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
544 tcg_gen_xor_tl(t1
, r2_high
, t2
);
545 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
547 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
548 /* Calc AV/SAV bits */
549 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
550 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
552 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
553 /* write back the result */
554 tcg_gen_mov_tl(ret_low
, t3
);
555 tcg_gen_mov_tl(ret_high
, t4
);
564 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
567 TCGv_i64 t1
= tcg_temp_new_i64();
568 TCGv_i64 t2
= tcg_temp_new_i64();
569 TCGv_i64 t3
= tcg_temp_new_i64();
571 tcg_gen_extu_i32_i64(t1
, r1
);
572 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
573 tcg_gen_extu_i32_i64(t3
, r3
);
575 tcg_gen_mul_i64(t1
, t1
, t3
);
576 tcg_gen_add_i64(t2
, t2
, t1
);
577 /* write back result */
578 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
579 /* only the add overflows, if t2 < t1
581 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
582 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
583 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
585 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
586 /* Calc AV/SAV bits */
587 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
588 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
590 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
592 tcg_temp_free_i64(t1
);
593 tcg_temp_free_i64(t2
);
594 tcg_temp_free_i64(t3
);
598 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
601 TCGv temp
= tcg_const_i32(con
);
602 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
607 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
610 TCGv temp
= tcg_const_i32(con
);
611 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
616 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
617 TCGv r3
, uint32_t n
, uint32_t mode
)
619 TCGv temp
= tcg_const_i32(n
);
620 TCGv temp2
= tcg_temp_new();
621 TCGv_i64 temp64
= tcg_temp_new_i64();
624 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
627 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
630 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
633 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
636 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
637 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
638 tcg_gen_add_tl
, tcg_gen_add_tl
);
640 tcg_temp_free(temp2
);
641 tcg_temp_free_i64(temp64
);
645 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
646 TCGv r3
, uint32_t n
, uint32_t mode
)
648 TCGv temp
= tcg_const_i32(n
);
649 TCGv temp2
= tcg_temp_new();
650 TCGv_i64 temp64
= tcg_temp_new_i64();
653 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
656 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
659 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
662 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
665 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
666 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
667 tcg_gen_sub_tl
, tcg_gen_add_tl
);
669 tcg_temp_free(temp2
);
670 tcg_temp_free_i64(temp64
);
674 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
675 TCGv r3
, uint32_t n
, uint32_t mode
)
677 TCGv temp
= tcg_const_i32(n
);
678 TCGv_i64 temp64
= tcg_temp_new_i64();
679 TCGv_i64 temp64_2
= tcg_temp_new_i64();
680 TCGv_i64 temp64_3
= tcg_temp_new_i64();
683 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
686 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
689 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
692 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
695 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
696 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
697 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
698 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
699 tcg_gen_shli_i64(temp64
, temp64
, 16);
701 gen_add64_d(temp64_2
, temp64_3
, temp64
);
702 /* write back result */
703 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
706 tcg_temp_free_i64(temp64
);
707 tcg_temp_free_i64(temp64_2
);
708 tcg_temp_free_i64(temp64_3
);
711 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
714 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
715 TCGv r3
, uint32_t n
, uint32_t mode
)
717 TCGv temp
= tcg_const_i32(n
);
718 TCGv temp2
= tcg_temp_new();
719 TCGv temp3
= tcg_temp_new();
720 TCGv_i64 temp64
= tcg_temp_new_i64();
724 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
727 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
730 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
733 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
736 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
737 gen_adds(ret_low
, r1_low
, temp
);
738 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
739 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
740 gen_adds(ret_high
, r1_high
, temp2
);
742 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
743 /* combine av bits */
744 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
747 tcg_temp_free(temp2
);
748 tcg_temp_free(temp3
);
749 tcg_temp_free_i64(temp64
);
753 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
756 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
757 TCGv r3
, uint32_t n
, uint32_t mode
)
759 TCGv temp
= tcg_const_i32(n
);
760 TCGv temp2
= tcg_temp_new();
761 TCGv temp3
= tcg_temp_new();
762 TCGv_i64 temp64
= tcg_temp_new_i64();
766 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
769 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
772 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
775 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
778 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
779 gen_subs(ret_low
, r1_low
, temp
);
780 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
781 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
782 gen_adds(ret_high
, r1_high
, temp2
);
784 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
785 /* combine av bits */
786 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
789 tcg_temp_free(temp2
);
790 tcg_temp_free(temp3
);
791 tcg_temp_free_i64(temp64
);
796 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
797 TCGv r3
, uint32_t n
, uint32_t mode
)
799 TCGv temp
= tcg_const_i32(n
);
800 TCGv_i64 temp64
= tcg_temp_new_i64();
801 TCGv_i64 temp64_2
= tcg_temp_new_i64();
805 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
808 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
811 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
814 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
817 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
818 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
819 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
820 tcg_gen_shli_i64(temp64
, temp64
, 16);
821 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
823 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
824 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
827 tcg_temp_free_i64(temp64
);
828 tcg_temp_free_i64(temp64_2
);
833 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
834 TCGv r3
, uint32_t n
, uint32_t mode
)
836 TCGv temp
= tcg_const_i32(n
);
837 TCGv_i64 temp64
= tcg_temp_new_i64();
838 TCGv_i64 temp64_2
= tcg_temp_new_i64();
839 TCGv_i64 temp64_3
= tcg_temp_new_i64();
842 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
845 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
848 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
851 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
854 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
855 gen_add64_d(temp64_3
, temp64_2
, temp64
);
856 /* write back result */
857 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
860 tcg_temp_free_i64(temp64
);
861 tcg_temp_free_i64(temp64_2
);
862 tcg_temp_free_i64(temp64_3
);
866 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
867 TCGv r3
, uint32_t n
, uint32_t mode
)
869 TCGv temp
= tcg_const_i32(n
);
870 TCGv_i64 temp64
= tcg_temp_new_i64();
871 TCGv_i64 temp64_2
= tcg_temp_new_i64();
874 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
877 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
880 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
883 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
886 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
887 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
888 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
891 tcg_temp_free_i64(temp64
);
892 tcg_temp_free_i64(temp64_2
);
896 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
899 TCGv temp
= tcg_const_i32(n
);
900 TCGv_i64 temp64
= tcg_temp_new_i64();
903 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
906 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
909 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
912 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
915 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
918 tcg_temp_free_i64(temp64
);
922 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
924 TCGv temp
= tcg_temp_new();
925 TCGv temp2
= tcg_temp_new();
927 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
928 tcg_gen_shli_tl(temp
, r1
, 16);
929 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
932 tcg_temp_free(temp2
);
936 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
938 TCGv temp
= tcg_const_i32(n
);
939 TCGv temp2
= tcg_temp_new();
940 TCGv_i64 temp64
= tcg_temp_new_i64();
943 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
946 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
949 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
952 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
955 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
956 tcg_gen_shli_tl(temp
, r1
, 16);
957 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
960 tcg_temp_free(temp2
);
961 tcg_temp_free_i64(temp64
);
966 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
967 uint32_t n
, uint32_t mode
)
969 TCGv temp
= tcg_const_i32(n
);
970 TCGv_i64 temp64
= tcg_temp_new_i64();
973 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
976 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
979 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
982 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
985 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
988 tcg_temp_free_i64(temp64
);
992 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
994 TCGv temp
= tcg_temp_new();
995 TCGv temp2
= tcg_temp_new();
997 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
998 tcg_gen_shli_tl(temp
, r1
, 16);
999 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1001 tcg_temp_free(temp
);
1002 tcg_temp_free(temp2
);
1006 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1008 TCGv temp
= tcg_const_i32(n
);
1009 TCGv temp2
= tcg_temp_new();
1010 TCGv_i64 temp64
= tcg_temp_new_i64();
1013 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1016 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1019 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1022 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1025 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1026 tcg_gen_shli_tl(temp
, r1
, 16);
1027 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1029 tcg_temp_free(temp
);
1030 tcg_temp_free(temp2
);
1031 tcg_temp_free_i64(temp64
);
1035 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1037 TCGv temp
= tcg_const_i32(n
);
1038 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1039 tcg_temp_free(temp
);
1043 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1045 TCGv temp
= tcg_const_i32(n
);
1046 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1047 tcg_temp_free(temp
);
1051 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1052 uint32_t up_shift
, CPUTriCoreState
*env
)
1054 TCGv temp
= tcg_temp_new();
1055 TCGv temp2
= tcg_temp_new();
1056 TCGv temp3
= tcg_temp_new();
1057 TCGv_i64 t1
= tcg_temp_new_i64();
1058 TCGv_i64 t2
= tcg_temp_new_i64();
1059 TCGv_i64 t3
= tcg_temp_new_i64();
1061 tcg_gen_ext_i32_i64(t2
, arg2
);
1062 tcg_gen_ext_i32_i64(t3
, arg3
);
1064 tcg_gen_mul_i64(t2
, t2
, t3
);
1065 tcg_gen_shli_i64(t2
, t2
, n
);
1067 tcg_gen_ext_i32_i64(t1
, arg1
);
1068 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1070 tcg_gen_add_i64(t3
, t1
, t2
);
1071 tcg_gen_trunc_i64_i32(temp3
, t3
);
1073 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1074 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1075 tcg_gen_or_i64(t1
, t1
, t2
);
1076 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1077 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1078 /* We produce an overflow on the host if the mul before was
1079 (0x80000000 * 0x80000000) << 1). If this is the
1080 case, we negate the ovf. */
1082 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1083 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1084 tcg_gen_and_tl(temp
, temp
, temp2
);
1085 tcg_gen_shli_tl(temp
, temp
, 31);
1086 /* negate v bit, if special condition */
1087 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1090 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1091 /* Calc AV/SAV bits */
1092 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1093 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1095 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1096 /* write back result */
1097 tcg_gen_mov_tl(ret
, temp3
);
1099 tcg_temp_free(temp
);
1100 tcg_temp_free(temp2
);
1101 tcg_temp_free(temp3
);
1102 tcg_temp_free_i64(t1
);
1103 tcg_temp_free_i64(t2
);
1104 tcg_temp_free_i64(t3
);
1108 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1110 TCGv temp
= tcg_temp_new();
1111 TCGv temp2
= tcg_temp_new();
1113 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1114 } else { /* n is expected to be 1 */
1115 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1116 tcg_gen_shli_tl(temp
, temp
, 1);
1117 /* catch special case r1 = r2 = 0x8000 */
1118 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1119 tcg_gen_sub_tl(temp
, temp
, temp2
);
1121 gen_add_d(ret
, arg1
, temp
);
1123 tcg_temp_free(temp
);
1124 tcg_temp_free(temp2
);
1128 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1130 TCGv temp
= tcg_temp_new();
1131 TCGv temp2
= tcg_temp_new();
1133 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1134 } else { /* n is expected to be 1 */
1135 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1136 tcg_gen_shli_tl(temp
, temp
, 1);
1137 /* catch special case r1 = r2 = 0x8000 */
1138 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1139 tcg_gen_sub_tl(temp
, temp
, temp2
);
1141 gen_adds(ret
, arg1
, temp
);
1143 tcg_temp_free(temp
);
1144 tcg_temp_free(temp2
);
1148 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1149 TCGv arg3
, uint32_t n
)
1151 TCGv temp
= tcg_temp_new();
1152 TCGv temp2
= tcg_temp_new();
1153 TCGv_i64 t1
= tcg_temp_new_i64();
1154 TCGv_i64 t2
= tcg_temp_new_i64();
1155 TCGv_i64 t3
= tcg_temp_new_i64();
1158 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1159 } else { /* n is expected to be 1 */
1160 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1161 tcg_gen_shli_tl(temp
, temp
, 1);
1162 /* catch special case r1 = r2 = 0x8000 */
1163 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1164 tcg_gen_sub_tl(temp
, temp
, temp2
);
1166 tcg_gen_ext_i32_i64(t2
, temp
);
1167 tcg_gen_shli_i64(t2
, t2
, 16);
1168 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1169 gen_add64_d(t3
, t1
, t2
);
1170 /* write back result */
1171 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1173 tcg_temp_free_i64(t1
);
1174 tcg_temp_free_i64(t2
);
1175 tcg_temp_free_i64(t3
);
1176 tcg_temp_free(temp
);
1177 tcg_temp_free(temp2
);
1181 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1182 TCGv arg3
, uint32_t n
)
1184 TCGv temp
= tcg_temp_new();
1185 TCGv temp2
= tcg_temp_new();
1186 TCGv_i64 t1
= tcg_temp_new_i64();
1187 TCGv_i64 t2
= tcg_temp_new_i64();
1190 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1191 } else { /* n is expected to be 1 */
1192 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1193 tcg_gen_shli_tl(temp
, temp
, 1);
1194 /* catch special case r1 = r2 = 0x8000 */
1195 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1196 tcg_gen_sub_tl(temp
, temp
, temp2
);
1198 tcg_gen_ext_i32_i64(t2
, temp
);
1199 tcg_gen_shli_i64(t2
, t2
, 16);
1200 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1202 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1203 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1205 tcg_temp_free(temp
);
1206 tcg_temp_free(temp2
);
1207 tcg_temp_free_i64(t1
);
1208 tcg_temp_free_i64(t2
);
1212 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1213 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1215 TCGv_i64 t1
= tcg_temp_new_i64();
1216 TCGv_i64 t2
= tcg_temp_new_i64();
1217 TCGv_i64 t3
= tcg_temp_new_i64();
1218 TCGv_i64 t4
= tcg_temp_new_i64();
1221 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1222 tcg_gen_ext_i32_i64(t2
, arg2
);
1223 tcg_gen_ext_i32_i64(t3
, arg3
);
1225 tcg_gen_mul_i64(t2
, t2
, t3
);
1227 tcg_gen_shli_i64(t2
, t2
, 1);
1229 tcg_gen_add_i64(t4
, t1
, t2
);
1231 tcg_gen_xor_i64(t3
, t4
, t1
);
1232 tcg_gen_xor_i64(t2
, t1
, t2
);
1233 tcg_gen_andc_i64(t3
, t3
, t2
);
1234 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
1235 /* We produce an overflow on the host if the mul before was
1236 (0x80000000 * 0x80000000) << 1). If this is the
1237 case, we negate the ovf. */
1239 temp
= tcg_temp_new();
1240 temp2
= tcg_temp_new();
1241 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1242 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1243 tcg_gen_and_tl(temp
, temp
, temp2
);
1244 tcg_gen_shli_tl(temp
, temp
, 31);
1245 /* negate v bit, if special condition */
1246 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1248 tcg_temp_free(temp
);
1249 tcg_temp_free(temp2
);
1251 /* write back result */
1252 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1254 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1255 /* Calc AV/SAV bits */
1256 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1257 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1259 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1261 tcg_temp_free_i64(t1
);
1262 tcg_temp_free_i64(t2
);
1263 tcg_temp_free_i64(t3
);
1264 tcg_temp_free_i64(t4
);
1268 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1271 TCGv_i64 t1
= tcg_temp_new_i64();
1272 TCGv_i64 t2
= tcg_temp_new_i64();
1273 TCGv_i64 t3
= tcg_temp_new_i64();
1275 tcg_gen_ext_i32_i64(t1
, arg1
);
1276 tcg_gen_ext_i32_i64(t2
, arg2
);
1277 tcg_gen_ext_i32_i64(t3
, arg3
);
1279 tcg_gen_mul_i64(t2
, t2
, t3
);
1280 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1282 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1284 tcg_temp_free_i64(t1
);
1285 tcg_temp_free_i64(t2
);
1286 tcg_temp_free_i64(t3
);
1290 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1291 TCGv arg3
, uint32_t n
)
1293 TCGv_i64 r1
= tcg_temp_new_i64();
1294 TCGv temp
= tcg_const_i32(n
);
1296 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1297 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1298 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1300 tcg_temp_free_i64(r1
);
1301 tcg_temp_free(temp
);
1303 /* ret = r2 - (r1 * r3); */
1304 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1306 TCGv_i64 t1
= tcg_temp_new_i64();
1307 TCGv_i64 t2
= tcg_temp_new_i64();
1308 TCGv_i64 t3
= tcg_temp_new_i64();
1310 tcg_gen_ext_i32_i64(t1
, r1
);
1311 tcg_gen_ext_i32_i64(t2
, r2
);
1312 tcg_gen_ext_i32_i64(t3
, r3
);
1314 tcg_gen_mul_i64(t1
, t1
, t3
);
1315 tcg_gen_sub_i64(t1
, t2
, t1
);
1317 tcg_gen_trunc_i64_i32(ret
, t1
);
1320 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1321 /* result < -0x80000000 */
1322 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1323 tcg_gen_or_i64(t2
, t2
, t3
);
1324 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
1325 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1328 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1329 /* Calc AV/SAV bits */
1330 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1331 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1333 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1335 tcg_temp_free_i64(t1
);
1336 tcg_temp_free_i64(t2
);
1337 tcg_temp_free_i64(t3
);
1340 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1342 TCGv temp
= tcg_const_i32(con
);
1343 gen_msub32_d(ret
, r1
, r2
, temp
);
1344 tcg_temp_free(temp
);
1348 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1351 TCGv t1
= tcg_temp_new();
1352 TCGv t2
= tcg_temp_new();
1353 TCGv t3
= tcg_temp_new();
1354 TCGv t4
= tcg_temp_new();
1356 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1357 /* only the sub can overflow */
1358 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1360 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1361 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1362 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1364 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1365 /* Calc AV/SAV bits */
1366 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1367 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1369 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1370 /* write back the result */
1371 tcg_gen_mov_tl(ret_low
, t3
);
1372 tcg_gen_mov_tl(ret_high
, t4
);
1381 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1384 TCGv temp
= tcg_const_i32(con
);
1385 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1386 tcg_temp_free(temp
);
1390 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1393 TCGv_i64 t1
= tcg_temp_new_i64();
1394 TCGv_i64 t2
= tcg_temp_new_i64();
1395 TCGv_i64 t3
= tcg_temp_new_i64();
1397 tcg_gen_extu_i32_i64(t1
, r1
);
1398 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1399 tcg_gen_extu_i32_i64(t3
, r3
);
1401 tcg_gen_mul_i64(t1
, t1
, t3
);
1402 tcg_gen_sub_i64(t3
, t2
, t1
);
1403 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1404 /* calc V bit, only the sub can overflow, if t1 > t2 */
1405 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1406 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1407 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1409 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1410 /* Calc AV/SAV bits */
1411 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1412 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1414 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1416 tcg_temp_free_i64(t1
);
1417 tcg_temp_free_i64(t2
);
1418 tcg_temp_free_i64(t3
);
1422 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1425 TCGv temp
= tcg_const_i32(con
);
1426 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1427 tcg_temp_free(temp
);
1430 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1432 TCGv temp
= tcg_const_i32(r2
);
1433 gen_add_d(ret
, r1
, temp
);
1434 tcg_temp_free(temp
);
1436 /* calculate the carry bit too */
1437 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1439 TCGv t0
= tcg_temp_new_i32();
1440 TCGv result
= tcg_temp_new_i32();
1442 tcg_gen_movi_tl(t0
, 0);
1443 /* Addition and set C/V/SV bits */
1444 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1446 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1447 tcg_gen_xor_tl(t0
, r1
, r2
);
1448 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1450 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1451 /* Calc AV/SAV bits */
1452 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1453 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1455 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1456 /* write back result */
1457 tcg_gen_mov_tl(ret
, result
);
1459 tcg_temp_free(result
);
1463 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1465 TCGv temp
= tcg_const_i32(con
);
1466 gen_add_CC(ret
, r1
, temp
);
1467 tcg_temp_free(temp
);
1470 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1472 TCGv carry
= tcg_temp_new_i32();
1473 TCGv t0
= tcg_temp_new_i32();
1474 TCGv result
= tcg_temp_new_i32();
1476 tcg_gen_movi_tl(t0
, 0);
1477 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1478 /* Addition, carry and set C/V/SV bits */
1479 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1480 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1482 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1483 tcg_gen_xor_tl(t0
, r1
, r2
);
1484 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1486 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1487 /* Calc AV/SAV bits */
1488 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1489 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1491 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1492 /* write back result */
1493 tcg_gen_mov_tl(ret
, result
);
1495 tcg_temp_free(result
);
1497 tcg_temp_free(carry
);
1500 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1502 TCGv temp
= tcg_const_i32(con
);
1503 gen_addc_CC(ret
, r1
, temp
);
1504 tcg_temp_free(temp
);
1507 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1510 TCGv temp
= tcg_temp_new();
1511 TCGv temp2
= tcg_temp_new();
1512 TCGv result
= tcg_temp_new();
1513 TCGv mask
= tcg_temp_new();
1514 TCGv t0
= tcg_const_i32(0);
1516 /* create mask for sticky bits */
1517 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1518 tcg_gen_shli_tl(mask
, mask
, 31);
1520 tcg_gen_add_tl(result
, r1
, r2
);
1522 tcg_gen_xor_tl(temp
, result
, r1
);
1523 tcg_gen_xor_tl(temp2
, r1
, r2
);
1524 tcg_gen_andc_tl(temp
, temp
, temp2
);
1525 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1527 tcg_gen_and_tl(temp
, temp
, mask
);
1528 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1530 tcg_gen_add_tl(temp
, result
, result
);
1531 tcg_gen_xor_tl(temp
, temp
, result
);
1532 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1534 tcg_gen_and_tl(temp
, temp
, mask
);
1535 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1536 /* write back result */
1537 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1540 tcg_temp_free(temp
);
1541 tcg_temp_free(temp2
);
1542 tcg_temp_free(result
);
1543 tcg_temp_free(mask
);
1546 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1549 TCGv temp
= tcg_const_i32(r2
);
1550 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1551 tcg_temp_free(temp
);
1554 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1556 TCGv temp
= tcg_temp_new_i32();
1557 TCGv result
= tcg_temp_new_i32();
1559 tcg_gen_sub_tl(result
, r1
, r2
);
1561 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1562 tcg_gen_xor_tl(temp
, r1
, r2
);
1563 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1565 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1567 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1568 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1570 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1571 /* write back result */
1572 tcg_gen_mov_tl(ret
, result
);
1574 tcg_temp_free(temp
);
1575 tcg_temp_free(result
);
1579 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1581 TCGv temp
= tcg_temp_new();
1582 TCGv_i64 t0
= tcg_temp_new_i64();
1583 TCGv_i64 t1
= tcg_temp_new_i64();
1584 TCGv_i64 result
= tcg_temp_new_i64();
1586 tcg_gen_sub_i64(result
, r1
, r2
);
1588 tcg_gen_xor_i64(t1
, result
, r1
);
1589 tcg_gen_xor_i64(t0
, r1
, r2
);
1590 tcg_gen_and_i64(t1
, t1
, t0
);
1591 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
1593 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1594 /* calc AV/SAV bits */
1595 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
1596 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1597 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1599 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1600 /* write back result */
1601 tcg_gen_mov_i64(ret
, result
);
1603 tcg_temp_free(temp
);
1604 tcg_temp_free_i64(result
);
1605 tcg_temp_free_i64(t0
);
1606 tcg_temp_free_i64(t1
);
1609 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1611 TCGv result
= tcg_temp_new();
1612 TCGv temp
= tcg_temp_new();
1614 tcg_gen_sub_tl(result
, r1
, r2
);
1616 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1618 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1619 tcg_gen_xor_tl(temp
, r1
, r2
);
1620 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1622 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1624 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1625 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1627 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1628 /* write back result */
1629 tcg_gen_mov_tl(ret
, result
);
1631 tcg_temp_free(result
);
1632 tcg_temp_free(temp
);
1635 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1637 TCGv temp
= tcg_temp_new();
1638 tcg_gen_not_tl(temp
, r2
);
1639 gen_addc_CC(ret
, r1
, temp
);
1640 tcg_temp_free(temp
);
1643 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1646 TCGv temp
= tcg_temp_new();
1647 TCGv temp2
= tcg_temp_new();
1648 TCGv result
= tcg_temp_new();
1649 TCGv mask
= tcg_temp_new();
1650 TCGv t0
= tcg_const_i32(0);
1652 /* create mask for sticky bits */
1653 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1654 tcg_gen_shli_tl(mask
, mask
, 31);
1656 tcg_gen_sub_tl(result
, r1
, r2
);
1658 tcg_gen_xor_tl(temp
, result
, r1
);
1659 tcg_gen_xor_tl(temp2
, r1
, r2
);
1660 tcg_gen_and_tl(temp
, temp
, temp2
);
1661 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1663 tcg_gen_and_tl(temp
, temp
, mask
);
1664 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1666 tcg_gen_add_tl(temp
, result
, result
);
1667 tcg_gen_xor_tl(temp
, temp
, result
);
1668 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1670 tcg_gen_and_tl(temp
, temp
, mask
);
1671 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1672 /* write back result */
1673 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1676 tcg_temp_free(temp
);
1677 tcg_temp_free(temp2
);
1678 tcg_temp_free(result
);
1679 tcg_temp_free(mask
);
1683 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1684 TCGv r3
, uint32_t n
, uint32_t mode
)
1686 TCGv temp
= tcg_const_i32(n
);
1687 TCGv temp2
= tcg_temp_new();
1688 TCGv_i64 temp64
= tcg_temp_new_i64();
1691 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1694 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1697 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1700 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1703 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1704 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1705 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1706 tcg_temp_free(temp
);
1707 tcg_temp_free(temp2
);
1708 tcg_temp_free_i64(temp64
);
1712 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1713 TCGv r3
, uint32_t n
, uint32_t mode
)
1715 TCGv temp
= tcg_const_i32(n
);
1716 TCGv temp2
= tcg_temp_new();
1717 TCGv temp3
= tcg_temp_new();
1718 TCGv_i64 temp64
= tcg_temp_new_i64();
1722 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1725 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1728 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1731 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1734 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1735 gen_subs(ret_low
, r1_low
, temp
);
1736 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1737 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1738 gen_subs(ret_high
, r1_high
, temp2
);
1739 /* combine v bits */
1740 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1741 /* combine av bits */
1742 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1744 tcg_temp_free(temp
);
1745 tcg_temp_free(temp2
);
1746 tcg_temp_free(temp3
);
1747 tcg_temp_free_i64(temp64
);
1751 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1752 TCGv r3
, uint32_t n
, uint32_t mode
)
1754 TCGv temp
= tcg_const_i32(n
);
1755 TCGv_i64 temp64
= tcg_temp_new_i64();
1756 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1757 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1760 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1763 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1766 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1769 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1772 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1773 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1774 /* write back result */
1775 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1777 tcg_temp_free(temp
);
1778 tcg_temp_free_i64(temp64
);
1779 tcg_temp_free_i64(temp64_2
);
1780 tcg_temp_free_i64(temp64_3
);
1784 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1785 TCGv r3
, uint32_t n
, uint32_t mode
)
1787 TCGv temp
= tcg_const_i32(n
);
1788 TCGv_i64 temp64
= tcg_temp_new_i64();
1789 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1792 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1795 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1798 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1801 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1804 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1805 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1806 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1808 tcg_temp_free(temp
);
1809 tcg_temp_free_i64(temp64
);
1810 tcg_temp_free_i64(temp64_2
);
1814 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1817 TCGv temp
= tcg_const_i32(n
);
1818 TCGv_i64 temp64
= tcg_temp_new_i64();
1821 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1824 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1827 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1830 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1833 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1835 tcg_temp_free(temp
);
1836 tcg_temp_free_i64(temp64
);
1840 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1842 TCGv temp
= tcg_temp_new();
1843 TCGv temp2
= tcg_temp_new();
1845 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1846 tcg_gen_shli_tl(temp
, r1
, 16);
1847 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1849 tcg_temp_free(temp
);
1850 tcg_temp_free(temp2
);
1854 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1855 uint32_t n
, uint32_t mode
)
1857 TCGv temp
= tcg_const_i32(n
);
1858 TCGv_i64 temp64
= tcg_temp_new_i64();
1861 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1864 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1867 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1870 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1873 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1875 tcg_temp_free(temp
);
1876 tcg_temp_free_i64(temp64
);
1880 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1882 TCGv temp
= tcg_temp_new();
1883 TCGv temp2
= tcg_temp_new();
1885 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1886 tcg_gen_shli_tl(temp
, r1
, 16);
1887 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1889 tcg_temp_free(temp
);
1890 tcg_temp_free(temp2
);
1894 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1896 TCGv temp
= tcg_const_i32(n
);
1897 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1898 tcg_temp_free(temp
);
1902 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1904 TCGv temp
= tcg_const_i32(n
);
1905 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1906 tcg_temp_free(temp
);
1910 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1911 uint32_t up_shift
, CPUTriCoreState
*env
)
1913 TCGv temp
= tcg_temp_new();
1914 TCGv temp2
= tcg_temp_new();
1915 TCGv temp3
= tcg_temp_new();
1916 TCGv_i64 t1
= tcg_temp_new_i64();
1917 TCGv_i64 t2
= tcg_temp_new_i64();
1918 TCGv_i64 t3
= tcg_temp_new_i64();
1919 TCGv_i64 t4
= tcg_temp_new_i64();
1921 tcg_gen_ext_i32_i64(t2
, arg2
);
1922 tcg_gen_ext_i32_i64(t3
, arg3
);
1924 tcg_gen_mul_i64(t2
, t2
, t3
);
1926 tcg_gen_ext_i32_i64(t1
, arg1
);
1927 /* if we shift part of the fraction out, we need to round up */
1928 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1929 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1930 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1931 tcg_gen_add_i64(t2
, t2
, t4
);
1933 tcg_gen_sub_i64(t3
, t1
, t2
);
1934 tcg_gen_trunc_i64_i32(temp3
, t3
);
1936 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1937 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1938 tcg_gen_or_i64(t1
, t1
, t2
);
1939 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1940 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1941 /* We produce an overflow on the host if the mul before was
1942 (0x80000000 * 0x80000000) << 1). If this is the
1943 case, we negate the ovf. */
1945 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1946 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1947 tcg_gen_and_tl(temp
, temp
, temp2
);
1948 tcg_gen_shli_tl(temp
, temp
, 31);
1949 /* negate v bit, if special condition */
1950 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1953 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1954 /* Calc AV/SAV bits */
1955 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1956 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1958 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1959 /* write back result */
1960 tcg_gen_mov_tl(ret
, temp3
);
1962 tcg_temp_free(temp
);
1963 tcg_temp_free(temp2
);
1964 tcg_temp_free(temp3
);
1965 tcg_temp_free_i64(t1
);
1966 tcg_temp_free_i64(t2
);
1967 tcg_temp_free_i64(t3
);
1968 tcg_temp_free_i64(t4
);
1972 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1974 TCGv temp
= tcg_temp_new();
1975 TCGv temp2
= tcg_temp_new();
1977 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1978 } else { /* n is expected to be 1 */
1979 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1980 tcg_gen_shli_tl(temp
, temp
, 1);
1981 /* catch special case r1 = r2 = 0x8000 */
1982 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1983 tcg_gen_sub_tl(temp
, temp
, temp2
);
1985 gen_sub_d(ret
, arg1
, temp
);
1987 tcg_temp_free(temp
);
1988 tcg_temp_free(temp2
);
1992 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1994 TCGv temp
= tcg_temp_new();
1995 TCGv temp2
= tcg_temp_new();
1997 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1998 } else { /* n is expected to be 1 */
1999 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2000 tcg_gen_shli_tl(temp
, temp
, 1);
2001 /* catch special case r1 = r2 = 0x8000 */
2002 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2003 tcg_gen_sub_tl(temp
, temp
, temp2
);
2005 gen_subs(ret
, arg1
, temp
);
2007 tcg_temp_free(temp
);
2008 tcg_temp_free(temp2
);
2012 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2013 TCGv arg3
, uint32_t n
)
2015 TCGv temp
= tcg_temp_new();
2016 TCGv temp2
= tcg_temp_new();
2017 TCGv_i64 t1
= tcg_temp_new_i64();
2018 TCGv_i64 t2
= tcg_temp_new_i64();
2019 TCGv_i64 t3
= tcg_temp_new_i64();
2022 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2023 } else { /* n is expected to be 1 */
2024 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2025 tcg_gen_shli_tl(temp
, temp
, 1);
2026 /* catch special case r1 = r2 = 0x8000 */
2027 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2028 tcg_gen_sub_tl(temp
, temp
, temp2
);
2030 tcg_gen_ext_i32_i64(t2
, temp
);
2031 tcg_gen_shli_i64(t2
, t2
, 16);
2032 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2033 gen_sub64_d(t3
, t1
, t2
);
2034 /* write back result */
2035 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2037 tcg_temp_free_i64(t1
);
2038 tcg_temp_free_i64(t2
);
2039 tcg_temp_free_i64(t3
);
2040 tcg_temp_free(temp
);
2041 tcg_temp_free(temp2
);
2045 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2046 TCGv arg3
, uint32_t n
)
2048 TCGv temp
= tcg_temp_new();
2049 TCGv temp2
= tcg_temp_new();
2050 TCGv_i64 t1
= tcg_temp_new_i64();
2051 TCGv_i64 t2
= tcg_temp_new_i64();
2054 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2055 } else { /* n is expected to be 1 */
2056 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2057 tcg_gen_shli_tl(temp
, temp
, 1);
2058 /* catch special case r1 = r2 = 0x8000 */
2059 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2060 tcg_gen_sub_tl(temp
, temp
, temp2
);
2062 tcg_gen_ext_i32_i64(t2
, temp
);
2063 tcg_gen_shli_i64(t2
, t2
, 16);
2064 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2066 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2067 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2069 tcg_temp_free(temp
);
2070 tcg_temp_free(temp2
);
2071 tcg_temp_free_i64(t1
);
2072 tcg_temp_free_i64(t2
);
2076 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2077 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2079 TCGv_i64 t1
= tcg_temp_new_i64();
2080 TCGv_i64 t2
= tcg_temp_new_i64();
2081 TCGv_i64 t3
= tcg_temp_new_i64();
2082 TCGv_i64 t4
= tcg_temp_new_i64();
2085 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2086 tcg_gen_ext_i32_i64(t2
, arg2
);
2087 tcg_gen_ext_i32_i64(t3
, arg3
);
2089 tcg_gen_mul_i64(t2
, t2
, t3
);
2091 tcg_gen_shli_i64(t2
, t2
, 1);
2093 tcg_gen_sub_i64(t4
, t1
, t2
);
2095 tcg_gen_xor_i64(t3
, t4
, t1
);
2096 tcg_gen_xor_i64(t2
, t1
, t2
);
2097 tcg_gen_and_i64(t3
, t3
, t2
);
2098 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
2099 /* We produce an overflow on the host if the mul before was
2100 (0x80000000 * 0x80000000) << 1). If this is the
2101 case, we negate the ovf. */
2103 temp
= tcg_temp_new();
2104 temp2
= tcg_temp_new();
2105 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2106 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2107 tcg_gen_and_tl(temp
, temp
, temp2
);
2108 tcg_gen_shli_tl(temp
, temp
, 31);
2109 /* negate v bit, if special condition */
2110 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2112 tcg_temp_free(temp
);
2113 tcg_temp_free(temp2
);
2115 /* write back result */
2116 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2118 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2119 /* Calc AV/SAV bits */
2120 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2121 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2123 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2125 tcg_temp_free_i64(t1
);
2126 tcg_temp_free_i64(t2
);
2127 tcg_temp_free_i64(t3
);
2128 tcg_temp_free_i64(t4
);
2132 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2135 TCGv_i64 t1
= tcg_temp_new_i64();
2136 TCGv_i64 t2
= tcg_temp_new_i64();
2137 TCGv_i64 t3
= tcg_temp_new_i64();
2138 TCGv_i64 t4
= tcg_temp_new_i64();
2140 tcg_gen_ext_i32_i64(t1
, arg1
);
2141 tcg_gen_ext_i32_i64(t2
, arg2
);
2142 tcg_gen_ext_i32_i64(t3
, arg3
);
2144 tcg_gen_mul_i64(t2
, t2
, t3
);
2145 /* if we shift part of the fraction out, we need to round up */
2146 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2147 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2148 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2149 tcg_gen_add_i64(t3
, t3
, t4
);
2151 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2153 tcg_temp_free_i64(t1
);
2154 tcg_temp_free_i64(t2
);
2155 tcg_temp_free_i64(t3
);
2156 tcg_temp_free_i64(t4
);
2160 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2161 TCGv arg3
, uint32_t n
)
2163 TCGv_i64 r1
= tcg_temp_new_i64();
2164 TCGv temp
= tcg_const_i32(n
);
2166 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2167 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2168 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2170 tcg_temp_free_i64(r1
);
2171 tcg_temp_free(temp
);
2175 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2176 TCGv r3
, uint32_t n
, uint32_t mode
)
2178 TCGv temp
= tcg_const_i32(n
);
2179 TCGv temp2
= tcg_temp_new();
2180 TCGv_i64 temp64
= tcg_temp_new_i64();
2183 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2186 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2189 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2192 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2195 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2196 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2197 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2198 tcg_temp_free(temp
);
2199 tcg_temp_free(temp2
);
2200 tcg_temp_free_i64(temp64
);
2204 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2205 TCGv r3
, uint32_t n
, uint32_t mode
)
2207 TCGv temp
= tcg_const_i32(n
);
2208 TCGv_i64 temp64
= tcg_temp_new_i64();
2209 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2210 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2213 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2216 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2219 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2222 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2225 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2226 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2227 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2228 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2229 tcg_gen_shli_i64(temp64
, temp64
, 16);
2231 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2232 /* write back result */
2233 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2235 tcg_temp_free(temp
);
2236 tcg_temp_free_i64(temp64
);
2237 tcg_temp_free_i64(temp64_2
);
2238 tcg_temp_free_i64(temp64_3
);
2242 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2244 TCGv temp
= tcg_const_i32(n
);
2245 TCGv temp2
= tcg_temp_new();
2246 TCGv_i64 temp64
= tcg_temp_new_i64();
2249 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2252 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2255 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2258 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2261 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2262 tcg_gen_shli_tl(temp
, r1
, 16);
2263 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2265 tcg_temp_free(temp
);
2266 tcg_temp_free(temp2
);
2267 tcg_temp_free_i64(temp64
);
2271 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2272 TCGv r3
, uint32_t n
, uint32_t mode
)
2274 TCGv temp
= tcg_const_i32(n
);
2275 TCGv temp2
= tcg_temp_new();
2276 TCGv temp3
= tcg_temp_new();
2277 TCGv_i64 temp64
= tcg_temp_new_i64();
2281 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2284 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2287 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2290 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2293 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2294 gen_adds(ret_low
, r1_low
, temp
);
2295 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2296 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2297 gen_subs(ret_high
, r1_high
, temp2
);
2298 /* combine v bits */
2299 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2300 /* combine av bits */
2301 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2303 tcg_temp_free(temp
);
2304 tcg_temp_free(temp2
);
2305 tcg_temp_free(temp3
);
2306 tcg_temp_free_i64(temp64
);
2310 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2311 TCGv r3
, uint32_t n
, uint32_t mode
)
2313 TCGv temp
= tcg_const_i32(n
);
2314 TCGv_i64 temp64
= tcg_temp_new_i64();
2315 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2319 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2322 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2325 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2328 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2331 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2332 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2333 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2334 tcg_gen_shli_i64(temp64
, temp64
, 16);
2335 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2337 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2338 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2340 tcg_temp_free(temp
);
2341 tcg_temp_free_i64(temp64
);
2342 tcg_temp_free_i64(temp64_2
);
2346 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2348 TCGv temp
= tcg_const_i32(n
);
2349 TCGv temp2
= tcg_temp_new();
2350 TCGv_i64 temp64
= tcg_temp_new_i64();
2353 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2356 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2359 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2362 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2365 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2366 tcg_gen_shli_tl(temp
, r1
, 16);
2367 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2369 tcg_temp_free(temp
);
2370 tcg_temp_free(temp2
);
2371 tcg_temp_free_i64(temp64
);
2374 static inline void gen_abs(TCGv ret
, TCGv r1
)
2376 TCGv temp
= tcg_temp_new();
2377 TCGv t0
= tcg_const_i32(0);
2379 tcg_gen_neg_tl(temp
, r1
);
2380 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2381 /* overflow can only happen, if r1 = 0x80000000 */
2382 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2383 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2385 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2387 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2388 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2390 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2392 tcg_temp_free(temp
);
2396 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2398 TCGv temp
= tcg_temp_new_i32();
2399 TCGv result
= tcg_temp_new_i32();
2401 tcg_gen_sub_tl(result
, r1
, r2
);
2402 tcg_gen_sub_tl(temp
, r2
, r1
);
2403 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2406 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2407 tcg_gen_xor_tl(temp
, result
, r2
);
2408 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2409 tcg_gen_xor_tl(temp
, r1
, r2
);
2410 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2412 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2414 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2415 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2417 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2418 /* write back result */
2419 tcg_gen_mov_tl(ret
, result
);
2421 tcg_temp_free(temp
);
2422 tcg_temp_free(result
);
2425 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2427 TCGv temp
= tcg_const_i32(con
);
2428 gen_absdif(ret
, r1
, temp
);
2429 tcg_temp_free(temp
);
2432 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2434 TCGv temp
= tcg_const_i32(con
);
2435 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2436 tcg_temp_free(temp
);
2439 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2441 TCGv high
= tcg_temp_new();
2442 TCGv low
= tcg_temp_new();
2444 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2445 tcg_gen_mov_tl(ret
, low
);
2447 tcg_gen_sari_tl(low
, low
, 31);
2448 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2449 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2451 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2453 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2454 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2456 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2458 tcg_temp_free(high
);
2462 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2464 TCGv temp
= tcg_const_i32(con
);
2465 gen_mul_i32s(ret
, r1
, temp
);
2466 tcg_temp_free(temp
);
2469 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2471 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2473 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2475 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2477 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2478 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2480 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2483 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2486 TCGv temp
= tcg_const_i32(con
);
2487 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2488 tcg_temp_free(temp
);
2491 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2493 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2495 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2497 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2499 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2500 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2502 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2505 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2508 TCGv temp
= tcg_const_i32(con
);
2509 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2510 tcg_temp_free(temp
);
2513 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2515 TCGv temp
= tcg_const_i32(con
);
2516 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2517 tcg_temp_free(temp
);
2520 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2522 TCGv temp
= tcg_const_i32(con
);
2523 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2524 tcg_temp_free(temp
);
2526 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2527 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2529 TCGv temp
= tcg_const_i32(con
);
2530 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2531 tcg_temp_free(temp
);
2534 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2536 TCGv temp
= tcg_const_i32(con
);
2537 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2538 tcg_temp_free(temp
);
2542 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2544 TCGv temp
= tcg_temp_new();
2545 TCGv_i64 temp_64
= tcg_temp_new_i64();
2546 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2549 if (up_shift
== 32) {
2550 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2551 } else if (up_shift
== 16) {
2552 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2553 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2555 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2556 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2557 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2559 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2562 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2563 } else { /* n is expected to be 1 */
2564 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2565 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2567 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2569 if (up_shift
== 0) {
2570 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2572 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2574 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2575 /* overflow only occurs if r1 = r2 = 0x8000 */
2576 if (up_shift
== 0) {/* result is 64 bit */
2577 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2579 } else { /* result is 32 bit */
2580 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2583 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2584 /* calc sv overflow bit */
2585 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2587 /* calc av overflow bit */
2588 if (up_shift
== 0) {
2589 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2590 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2592 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2593 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2595 /* calc sav overflow bit */
2596 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2597 tcg_temp_free(temp
);
2598 tcg_temp_free_i64(temp_64
);
2599 tcg_temp_free_i64(temp2_64
);
2603 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2605 TCGv temp
= tcg_temp_new();
2607 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2608 } else { /* n is expected to be 1 */
2609 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2610 tcg_gen_shli_tl(ret
, ret
, 1);
2611 /* catch special case r1 = r2 = 0x8000 */
2612 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2613 tcg_gen_sub_tl(ret
, ret
, temp
);
2616 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2617 /* calc av overflow bit */
2618 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2619 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2620 /* calc sav overflow bit */
2621 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2623 tcg_temp_free(temp
);
2626 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2628 TCGv temp
= tcg_temp_new();
2630 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2631 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2633 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2634 tcg_gen_shli_tl(ret
, ret
, 1);
2635 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2636 /* catch special case r1 = r2 = 0x8000 */
2637 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2638 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2639 tcg_gen_sub_tl(ret
, ret
, temp
);
2642 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2643 /* calc av overflow bit */
2644 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2645 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2646 /* calc sav overflow bit */
2647 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2648 /* cut halfword off */
2649 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2651 tcg_temp_free(temp
);
2655 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2658 TCGv_i64 temp64
= tcg_temp_new_i64();
2659 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2660 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2661 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2662 tcg_temp_free_i64(temp64
);
2666 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2669 TCGv temp
= tcg_const_i32(con
);
2670 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2671 tcg_temp_free(temp
);
2675 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2678 TCGv_i64 temp64
= tcg_temp_new_i64();
2679 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2680 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2681 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2682 tcg_temp_free_i64(temp64
);
2686 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2689 TCGv temp
= tcg_const_i32(con
);
2690 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2691 tcg_temp_free(temp
);
2694 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2696 TCGv temp
= tcg_const_i32(con
);
2697 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2698 tcg_temp_free(temp
);
2701 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2703 TCGv temp
= tcg_const_i32(con
);
2704 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2705 tcg_temp_free(temp
);
2709 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2712 TCGv_i64 temp64
= tcg_temp_new_i64();
2713 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2714 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2715 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2716 tcg_temp_free_i64(temp64
);
2720 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2723 TCGv temp
= tcg_const_i32(con
);
2724 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2725 tcg_temp_free(temp
);
2729 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2732 TCGv_i64 temp64
= tcg_temp_new_i64();
2733 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2734 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2735 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2736 tcg_temp_free_i64(temp64
);
2740 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2743 TCGv temp
= tcg_const_i32(con
);
2744 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2745 tcg_temp_free(temp
);
2748 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2750 TCGv sat_neg
= tcg_const_i32(low
);
2751 TCGv temp
= tcg_const_i32(up
);
2753 /* sat_neg = (arg < low ) ? low : arg; */
2754 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2756 /* ret = (sat_neg > up ) ? up : sat_neg; */
2757 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2759 tcg_temp_free(sat_neg
);
2760 tcg_temp_free(temp
);
2763 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2765 TCGv temp
= tcg_const_i32(up
);
2766 /* sat_neg = (arg > up ) ? up : arg; */
2767 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2768 tcg_temp_free(temp
);
2771 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2773 if (shift_count
== -32) {
2774 tcg_gen_movi_tl(ret
, 0);
2775 } else if (shift_count
>= 0) {
2776 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2778 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2782 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2784 TCGv temp_low
, temp_high
;
2786 if (shiftcount
== -16) {
2787 tcg_gen_movi_tl(ret
, 0);
2789 temp_high
= tcg_temp_new();
2790 temp_low
= tcg_temp_new();
2792 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2793 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2794 gen_shi(temp_low
, temp_low
, shiftcount
);
2795 gen_shi(ret
, temp_high
, shiftcount
);
2796 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2798 tcg_temp_free(temp_low
);
2799 tcg_temp_free(temp_high
);
2803 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2805 uint32_t msk
, msk_start
;
2806 TCGv temp
= tcg_temp_new();
2807 TCGv temp2
= tcg_temp_new();
2808 TCGv t_0
= tcg_const_i32(0);
2810 if (shift_count
== 0) {
2811 /* Clear PSW.C and PSW.V */
2812 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2813 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2814 tcg_gen_mov_tl(ret
, r1
);
2815 } else if (shift_count
== -32) {
2817 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2818 /* fill ret completly with sign bit */
2819 tcg_gen_sari_tl(ret
, r1
, 31);
2821 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2822 } else if (shift_count
> 0) {
2823 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2824 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2827 msk_start
= 32 - shift_count
;
2828 msk
= ((1 << shift_count
) - 1) << msk_start
;
2829 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2830 /* calc v/sv bits */
2831 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2832 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2833 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2834 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2836 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2838 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2840 tcg_temp_free(t_max
);
2841 tcg_temp_free(t_min
);
2844 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2846 msk
= (1 << -shift_count
) - 1;
2847 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2849 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2851 /* calc av overflow bit */
2852 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2853 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2854 /* calc sav overflow bit */
2855 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2857 tcg_temp_free(temp
);
2858 tcg_temp_free(temp2
);
2862 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2864 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2867 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2869 TCGv temp
= tcg_const_i32(con
);
2870 gen_shas(ret
, r1
, temp
);
2871 tcg_temp_free(temp
);
2874 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2878 if (shift_count
== 0) {
2879 tcg_gen_mov_tl(ret
, r1
);
2880 } else if (shift_count
> 0) {
2881 low
= tcg_temp_new();
2882 high
= tcg_temp_new();
2884 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2885 tcg_gen_shli_tl(low
, r1
, shift_count
);
2886 tcg_gen_shli_tl(ret
, high
, shift_count
);
2887 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2890 tcg_temp_free(high
);
2892 low
= tcg_temp_new();
2893 high
= tcg_temp_new();
2895 tcg_gen_ext16s_tl(low
, r1
);
2896 tcg_gen_sari_tl(low
, low
, -shift_count
);
2897 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2898 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2901 tcg_temp_free(high
);
2906 /* ret = {ret[30:0], (r1 cond r2)}; */
2907 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2909 TCGv temp
= tcg_temp_new();
2910 TCGv temp2
= tcg_temp_new();
2912 tcg_gen_shli_tl(temp
, ret
, 1);
2913 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2914 tcg_gen_or_tl(ret
, temp
, temp2
);
2916 tcg_temp_free(temp
);
2917 tcg_temp_free(temp2
);
2920 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2922 TCGv temp
= tcg_const_i32(con
);
2923 gen_sh_cond(cond
, ret
, r1
, temp
);
2924 tcg_temp_free(temp
);
2927 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2929 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2932 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2934 TCGv temp
= tcg_const_i32(con
);
2935 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2936 tcg_temp_free(temp
);
2939 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2941 TCGv temp
= tcg_const_i32(con
);
2942 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2943 tcg_temp_free(temp
);
2946 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2948 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2951 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2953 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2956 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2958 void(*op1
)(TCGv
, TCGv
, TCGv
),
2959 void(*op2
)(TCGv
, TCGv
, TCGv
))
2963 temp1
= tcg_temp_new();
2964 temp2
= tcg_temp_new();
2966 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2967 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2969 (*op1
)(temp1
, temp1
, temp2
);
2970 (*op2
)(temp1
, ret
, temp1
);
2972 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2974 tcg_temp_free(temp1
);
2975 tcg_temp_free(temp2
);
2978 /* ret = r1[pos1] op1 r2[pos2]; */
2979 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2981 void(*op1
)(TCGv
, TCGv
, TCGv
))
2985 temp1
= tcg_temp_new();
2986 temp2
= tcg_temp_new();
2988 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2989 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2991 (*op1
)(ret
, temp1
, temp2
);
2993 tcg_gen_andi_tl(ret
, ret
, 0x1);
2995 tcg_temp_free(temp1
);
2996 tcg_temp_free(temp2
);
2999 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3000 void(*op
)(TCGv
, TCGv
, TCGv
))
3002 TCGv temp
= tcg_temp_new();
3003 TCGv temp2
= tcg_temp_new();
3004 /* temp = (arg1 cond arg2 )*/
3005 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3007 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3008 /* temp = temp insn temp2 */
3009 (*op
)(temp
, temp
, temp2
);
3010 /* ret = {ret[31:1], temp} */
3011 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3013 tcg_temp_free(temp
);
3014 tcg_temp_free(temp2
);
3018 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3019 void(*op
)(TCGv
, TCGv
, TCGv
))
3021 TCGv temp
= tcg_const_i32(con
);
3022 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3023 tcg_temp_free(temp
);
3026 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3027 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3029 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3030 tcg_gen_neg_tl(ret
, ret
);
3033 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3035 TCGv b0
= tcg_temp_new();
3036 TCGv b1
= tcg_temp_new();
3037 TCGv b2
= tcg_temp_new();
3038 TCGv b3
= tcg_temp_new();
3041 tcg_gen_andi_tl(b0
, r1
, 0xff);
3042 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3045 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3046 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3049 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3050 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3053 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3054 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3057 tcg_gen_or_tl(ret
, b0
, b1
);
3058 tcg_gen_or_tl(ret
, ret
, b2
);
3059 tcg_gen_or_tl(ret
, ret
, b3
);
3067 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3069 TCGv h0
= tcg_temp_new();
3070 TCGv h1
= tcg_temp_new();
3073 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3074 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3077 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3078 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3081 tcg_gen_or_tl(ret
, h0
, h1
);
3086 /* mask = ((1 << width) -1) << pos;
3087 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3088 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3090 TCGv mask
= tcg_temp_new();
3091 TCGv temp
= tcg_temp_new();
3092 TCGv temp2
= tcg_temp_new();
3094 tcg_gen_movi_tl(mask
, 1);
3095 tcg_gen_shl_tl(mask
, mask
, width
);
3096 tcg_gen_subi_tl(mask
, mask
, 1);
3097 tcg_gen_shl_tl(mask
, mask
, pos
);
3099 tcg_gen_shl_tl(temp
, r2
, pos
);
3100 tcg_gen_and_tl(temp
, temp
, mask
);
3101 tcg_gen_andc_tl(temp2
, r1
, mask
);
3102 tcg_gen_or_tl(ret
, temp
, temp2
);
3104 tcg_temp_free(mask
);
3105 tcg_temp_free(temp
);
3106 tcg_temp_free(temp2
);
3109 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3111 TCGv_i64 temp
= tcg_temp_new_i64();
3113 gen_helper_bsplit(temp
, r1
);
3114 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3116 tcg_temp_free_i64(temp
);
3119 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3121 TCGv_i64 temp
= tcg_temp_new_i64();
3123 gen_helper_unpack(temp
, r1
);
3124 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3126 tcg_temp_free_i64(temp
);
3130 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3132 TCGv_i64 ret
= tcg_temp_new_i64();
3134 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3135 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3137 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3139 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3141 tcg_temp_free_i64(ret
);
3145 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3147 TCGv_i64 ret
= tcg_temp_new_i64();
3149 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3150 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
3152 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
3154 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3156 tcg_temp_free_i64(ret
);
3159 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
3161 TCGv temp
= tcg_temp_new();
3163 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
3164 tcg_gen_xor_tl(temp
, temp
, arg_low
);
3165 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
3166 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
3167 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3169 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3170 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3171 tcg_temp_free(temp
);
3174 static void gen_calc_usb_mulr_h(TCGv arg
)
3176 TCGv temp
= tcg_temp_new();
3178 tcg_gen_add_tl(temp
, arg
, arg
);
3179 tcg_gen_xor_tl(temp
, temp
, arg
);
3180 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
3181 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3183 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3185 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3186 tcg_temp_free(temp
);
3189 /* helpers for generating program flow micro-ops */
3191 static inline void gen_save_pc(target_ulong pc
)
3193 tcg_gen_movi_tl(cpu_PC
, pc
);
3196 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
3198 TranslationBlock
*tb
;
3200 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
3201 likely(!ctx
->singlestep_enabled
)) {
3204 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3207 if (ctx
->singlestep_enabled
) {
3208 /* raise exception debug */
3214 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3215 TCGv r2
, int16_t address
)
3217 TCGLabel
*jumpLabel
= gen_new_label();
3218 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
3220 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
3222 gen_set_label(jumpLabel
);
3223 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
3226 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3227 int r2
, int16_t address
)
3229 TCGv temp
= tcg_const_i32(r2
);
3230 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
3231 tcg_temp_free(temp
);
3234 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
3236 TCGLabel
*l1
= gen_new_label();
3238 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
3239 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
3240 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
3242 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
3245 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
3246 int r2
, int32_t constant
, int32_t offset
)
3252 /* SB-format jumps */
3255 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3257 case OPC1_32_B_CALL
:
3258 case OPC1_16_SB_CALL
:
3259 gen_helper_1arg(call
, ctx
->next_pc
);
3260 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3263 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
3265 case OPC1_16_SB_JNZ
:
3266 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
3268 /* SBC-format jumps */
3269 case OPC1_16_SBC_JEQ
:
3270 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
3272 case OPC1_16_SBC_JNE
:
3273 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
3275 /* SBRN-format jumps */
3276 case OPC1_16_SBRN_JZ_T
:
3277 temp
= tcg_temp_new();
3278 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3279 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3280 tcg_temp_free(temp
);
3282 case OPC1_16_SBRN_JNZ_T
:
3283 temp
= tcg_temp_new();
3284 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3285 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3286 tcg_temp_free(temp
);
3288 /* SBR-format jumps */
3289 case OPC1_16_SBR_JEQ
:
3290 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3293 case OPC1_16_SBR_JNE
:
3294 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3297 case OPC1_16_SBR_JNZ
:
3298 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
3300 case OPC1_16_SBR_JNZ_A
:
3301 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3303 case OPC1_16_SBR_JGEZ
:
3304 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
3306 case OPC1_16_SBR_JGTZ
:
3307 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
3309 case OPC1_16_SBR_JLEZ
:
3310 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
3312 case OPC1_16_SBR_JLTZ
:
3313 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
3315 case OPC1_16_SBR_JZ
:
3316 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
3318 case OPC1_16_SBR_JZ_A
:
3319 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3321 case OPC1_16_SBR_LOOP
:
3322 gen_loop(ctx
, r1
, offset
* 2 - 32);
3324 /* SR-format jumps */
3326 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3329 case OPC2_32_SYS_RET
:
3330 case OPC2_16_SR_RET
:
3331 gen_helper_ret(cpu_env
);
3335 case OPC1_32_B_CALLA
:
3336 gen_helper_1arg(call
, ctx
->next_pc
);
3337 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3340 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3343 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3346 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3347 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3350 case OPCM_32_BRC_EQ_NEQ
:
3351 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3352 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3354 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3357 case OPCM_32_BRC_GE
:
3358 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3359 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3361 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3362 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3366 case OPCM_32_BRC_JLT
:
3367 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3368 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3370 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3371 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3375 case OPCM_32_BRC_JNE
:
3376 temp
= tcg_temp_new();
3377 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3378 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3379 /* subi is unconditional */
3380 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3381 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3383 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3384 /* addi is unconditional */
3385 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3386 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3388 tcg_temp_free(temp
);
3391 case OPCM_32_BRN_JTT
:
3392 n
= MASK_OP_BRN_N(ctx
->opcode
);
3394 temp
= tcg_temp_new();
3395 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3397 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3398 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3400 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3402 tcg_temp_free(temp
);
3405 case OPCM_32_BRR_EQ_NEQ
:
3406 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3407 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3410 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3414 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3415 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3416 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3419 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3423 case OPCM_32_BRR_GE
:
3424 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3425 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3428 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3432 case OPCM_32_BRR_JLT
:
3433 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3434 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3437 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3441 case OPCM_32_BRR_LOOP
:
3442 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3443 gen_loop(ctx
, r1
, offset
* 2);
3445 /* OPC2_32_BRR_LOOPU */
3446 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3449 case OPCM_32_BRR_JNE
:
3450 temp
= tcg_temp_new();
3451 temp2
= tcg_temp_new();
3452 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3453 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3454 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3455 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3456 /* subi is unconditional */
3457 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3458 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3460 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3461 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3462 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3463 /* addi is unconditional */
3464 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3465 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3467 tcg_temp_free(temp
);
3468 tcg_temp_free(temp2
);
3470 case OPCM_32_BRR_JNZ
:
3471 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3472 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3474 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3478 printf("Branch Error at %x\n", ctx
->pc
);
3480 ctx
->bstate
= BS_BRANCH
;
3485 * Functions for decoding instructions
3488 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3494 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3495 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3498 case OPC1_16_SRC_ADD
:
3499 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3501 case OPC1_16_SRC_ADD_A15
:
3502 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3504 case OPC1_16_SRC_ADD_15A
:
3505 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3507 case OPC1_16_SRC_ADD_A
:
3508 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3510 case OPC1_16_SRC_CADD
:
3511 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3514 case OPC1_16_SRC_CADDN
:
3515 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3518 case OPC1_16_SRC_CMOV
:
3519 temp
= tcg_const_tl(0);
3520 temp2
= tcg_const_tl(const4
);
3521 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3522 temp2
, cpu_gpr_d
[r1
]);
3523 tcg_temp_free(temp
);
3524 tcg_temp_free(temp2
);
3526 case OPC1_16_SRC_CMOVN
:
3527 temp
= tcg_const_tl(0);
3528 temp2
= tcg_const_tl(const4
);
3529 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3530 temp2
, cpu_gpr_d
[r1
]);
3531 tcg_temp_free(temp
);
3532 tcg_temp_free(temp2
);
3534 case OPC1_16_SRC_EQ
:
3535 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3538 case OPC1_16_SRC_LT
:
3539 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3542 case OPC1_16_SRC_MOV
:
3543 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3545 case OPC1_16_SRC_MOV_A
:
3546 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3547 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3549 case OPC1_16_SRC_SH
:
3550 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3552 case OPC1_16_SRC_SHA
:
3553 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3558 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3563 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3564 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3567 case OPC1_16_SRR_ADD
:
3568 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3570 case OPC1_16_SRR_ADD_A15
:
3571 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3573 case OPC1_16_SRR_ADD_15A
:
3574 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3576 case OPC1_16_SRR_ADD_A
:
3577 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3579 case OPC1_16_SRR_ADDS
:
3580 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3582 case OPC1_16_SRR_AND
:
3583 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3585 case OPC1_16_SRR_CMOV
:
3586 temp
= tcg_const_tl(0);
3587 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3588 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3589 tcg_temp_free(temp
);
3591 case OPC1_16_SRR_CMOVN
:
3592 temp
= tcg_const_tl(0);
3593 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3594 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3595 tcg_temp_free(temp
);
3597 case OPC1_16_SRR_EQ
:
3598 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3601 case OPC1_16_SRR_LT
:
3602 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3605 case OPC1_16_SRR_MOV
:
3606 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3608 case OPC1_16_SRR_MOV_A
:
3609 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3611 case OPC1_16_SRR_MOV_AA
:
3612 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3614 case OPC1_16_SRR_MOV_D
:
3615 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3617 case OPC1_16_SRR_MUL
:
3618 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3620 case OPC1_16_SRR_OR
:
3621 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3623 case OPC1_16_SRR_SUB
:
3624 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3626 case OPC1_16_SRR_SUB_A15B
:
3627 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3629 case OPC1_16_SRR_SUB_15AB
:
3630 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3632 case OPC1_16_SRR_SUBS
:
3633 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3635 case OPC1_16_SRR_XOR
:
3636 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3641 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3645 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3646 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3649 case OPC1_16_SSR_ST_A
:
3650 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3652 case OPC1_16_SSR_ST_A_POSTINC
:
3653 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3654 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3656 case OPC1_16_SSR_ST_B
:
3657 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3659 case OPC1_16_SSR_ST_B_POSTINC
:
3660 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3661 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3663 case OPC1_16_SSR_ST_H
:
3664 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3666 case OPC1_16_SSR_ST_H_POSTINC
:
3667 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3668 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3670 case OPC1_16_SSR_ST_W
:
3671 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3673 case OPC1_16_SSR_ST_W_POSTINC
:
3674 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3675 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3680 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3684 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3687 case OPC1_16_SC_AND
:
3688 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3690 case OPC1_16_SC_BISR
:
3691 gen_helper_1arg(bisr
, const16
& 0xff);
3693 case OPC1_16_SC_LD_A
:
3694 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3696 case OPC1_16_SC_LD_W
:
3697 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3699 case OPC1_16_SC_MOV
:
3700 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3703 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3705 case OPC1_16_SC_ST_A
:
3706 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3708 case OPC1_16_SC_ST_W
:
3709 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3711 case OPC1_16_SC_SUB_A
:
3712 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3717 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3721 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3722 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3726 case OPC1_16_SLR_LD_A
:
3727 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3729 case OPC1_16_SLR_LD_A_POSTINC
:
3730 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3731 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3733 case OPC1_16_SLR_LD_BU
:
3734 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3736 case OPC1_16_SLR_LD_BU_POSTINC
:
3737 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3738 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3740 case OPC1_16_SLR_LD_H
:
3741 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3743 case OPC1_16_SLR_LD_H_POSTINC
:
3744 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3745 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3747 case OPC1_16_SLR_LD_W
:
3748 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3750 case OPC1_16_SLR_LD_W_POSTINC
:
3751 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3752 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3757 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3762 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3763 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3767 case OPC1_16_SRO_LD_A
:
3768 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3770 case OPC1_16_SRO_LD_BU
:
3771 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3773 case OPC1_16_SRO_LD_H
:
3774 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3776 case OPC1_16_SRO_LD_W
:
3777 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3779 case OPC1_16_SRO_ST_A
:
3780 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3782 case OPC1_16_SRO_ST_B
:
3783 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3785 case OPC1_16_SRO_ST_H
:
3786 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3788 case OPC1_16_SRO_ST_W
:
3789 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3794 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3797 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3800 case OPC2_16_SR_NOP
:
3802 case OPC2_16_SR_RET
:
3803 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3805 case OPC2_16_SR_RFE
:
3806 gen_helper_rfe(cpu_env
);
3808 ctx
->bstate
= BS_BRANCH
;
3810 case OPC2_16_SR_DEBUG
:
3811 /* raise EXCP_DEBUG */
3816 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3822 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3823 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3826 case OPC2_16_SR_RSUB
:
3827 /* overflow only if r1 = -0x80000000 */
3828 temp
= tcg_const_i32(-0x80000000);
3830 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3831 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3833 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3835 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3837 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3838 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3840 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3841 tcg_temp_free(temp
);
3843 case OPC2_16_SR_SAT_B
:
3844 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3846 case OPC2_16_SR_SAT_BU
:
3847 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3849 case OPC2_16_SR_SAT_H
:
3850 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3852 case OPC2_16_SR_SAT_HU
:
3853 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3858 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3866 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3868 /* handle ADDSC.A opcode only being 6 bit long */
3869 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3870 op1
= OPC1_16_SRRS_ADDSC_A
;
3874 case OPC1_16_SRC_ADD
:
3875 case OPC1_16_SRC_ADD_A15
:
3876 case OPC1_16_SRC_ADD_15A
:
3877 case OPC1_16_SRC_ADD_A
:
3878 case OPC1_16_SRC_CADD
:
3879 case OPC1_16_SRC_CADDN
:
3880 case OPC1_16_SRC_CMOV
:
3881 case OPC1_16_SRC_CMOVN
:
3882 case OPC1_16_SRC_EQ
:
3883 case OPC1_16_SRC_LT
:
3884 case OPC1_16_SRC_MOV
:
3885 case OPC1_16_SRC_MOV_A
:
3886 case OPC1_16_SRC_SH
:
3887 case OPC1_16_SRC_SHA
:
3888 decode_src_opc(ctx
, op1
);
3891 case OPC1_16_SRR_ADD
:
3892 case OPC1_16_SRR_ADD_A15
:
3893 case OPC1_16_SRR_ADD_15A
:
3894 case OPC1_16_SRR_ADD_A
:
3895 case OPC1_16_SRR_ADDS
:
3896 case OPC1_16_SRR_AND
:
3897 case OPC1_16_SRR_CMOV
:
3898 case OPC1_16_SRR_CMOVN
:
3899 case OPC1_16_SRR_EQ
:
3900 case OPC1_16_SRR_LT
:
3901 case OPC1_16_SRR_MOV
:
3902 case OPC1_16_SRR_MOV_A
:
3903 case OPC1_16_SRR_MOV_AA
:
3904 case OPC1_16_SRR_MOV_D
:
3905 case OPC1_16_SRR_MUL
:
3906 case OPC1_16_SRR_OR
:
3907 case OPC1_16_SRR_SUB
:
3908 case OPC1_16_SRR_SUB_A15B
:
3909 case OPC1_16_SRR_SUB_15AB
:
3910 case OPC1_16_SRR_SUBS
:
3911 case OPC1_16_SRR_XOR
:
3912 decode_srr_opc(ctx
, op1
);
3915 case OPC1_16_SSR_ST_A
:
3916 case OPC1_16_SSR_ST_A_POSTINC
:
3917 case OPC1_16_SSR_ST_B
:
3918 case OPC1_16_SSR_ST_B_POSTINC
:
3919 case OPC1_16_SSR_ST_H
:
3920 case OPC1_16_SSR_ST_H_POSTINC
:
3921 case OPC1_16_SSR_ST_W
:
3922 case OPC1_16_SSR_ST_W_POSTINC
:
3923 decode_ssr_opc(ctx
, op1
);
3926 case OPC1_16_SRRS_ADDSC_A
:
3927 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3928 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3929 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3930 temp
= tcg_temp_new();
3931 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3932 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3933 tcg_temp_free(temp
);
3936 case OPC1_16_SLRO_LD_A
:
3937 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3938 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3939 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3941 case OPC1_16_SLRO_LD_BU
:
3942 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3943 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3944 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3946 case OPC1_16_SLRO_LD_H
:
3947 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3948 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3949 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3951 case OPC1_16_SLRO_LD_W
:
3952 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3953 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3954 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3957 case OPC1_16_SB_CALL
:
3959 case OPC1_16_SB_JNZ
:
3961 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3962 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3965 case OPC1_16_SBC_JEQ
:
3966 case OPC1_16_SBC_JNE
:
3967 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3968 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3969 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3972 case OPC1_16_SBRN_JNZ_T
:
3973 case OPC1_16_SBRN_JZ_T
:
3974 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3975 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3976 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3979 case OPC1_16_SBR_JEQ
:
3980 case OPC1_16_SBR_JGEZ
:
3981 case OPC1_16_SBR_JGTZ
:
3982 case OPC1_16_SBR_JLEZ
:
3983 case OPC1_16_SBR_JLTZ
:
3984 case OPC1_16_SBR_JNE
:
3985 case OPC1_16_SBR_JNZ
:
3986 case OPC1_16_SBR_JNZ_A
:
3987 case OPC1_16_SBR_JZ
:
3988 case OPC1_16_SBR_JZ_A
:
3989 case OPC1_16_SBR_LOOP
:
3990 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3991 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3992 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3995 case OPC1_16_SC_AND
:
3996 case OPC1_16_SC_BISR
:
3997 case OPC1_16_SC_LD_A
:
3998 case OPC1_16_SC_LD_W
:
3999 case OPC1_16_SC_MOV
:
4001 case OPC1_16_SC_ST_A
:
4002 case OPC1_16_SC_ST_W
:
4003 case OPC1_16_SC_SUB_A
:
4004 decode_sc_opc(ctx
, op1
);
4007 case OPC1_16_SLR_LD_A
:
4008 case OPC1_16_SLR_LD_A_POSTINC
:
4009 case OPC1_16_SLR_LD_BU
:
4010 case OPC1_16_SLR_LD_BU_POSTINC
:
4011 case OPC1_16_SLR_LD_H
:
4012 case OPC1_16_SLR_LD_H_POSTINC
:
4013 case OPC1_16_SLR_LD_W
:
4014 case OPC1_16_SLR_LD_W_POSTINC
:
4015 decode_slr_opc(ctx
, op1
);
4018 case OPC1_16_SRO_LD_A
:
4019 case OPC1_16_SRO_LD_BU
:
4020 case OPC1_16_SRO_LD_H
:
4021 case OPC1_16_SRO_LD_W
:
4022 case OPC1_16_SRO_ST_A
:
4023 case OPC1_16_SRO_ST_B
:
4024 case OPC1_16_SRO_ST_H
:
4025 case OPC1_16_SRO_ST_W
:
4026 decode_sro_opc(ctx
, op1
);
4029 case OPC1_16_SSRO_ST_A
:
4030 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4031 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4032 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4034 case OPC1_16_SSRO_ST_B
:
4035 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4036 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4037 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4039 case OPC1_16_SSRO_ST_H
:
4040 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4041 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4042 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4044 case OPC1_16_SSRO_ST_W
:
4045 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4046 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4047 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4050 case OPCM_16_SR_SYSTEM
:
4051 decode_sr_system(env
, ctx
);
4053 case OPCM_16_SR_ACCU
:
4054 decode_sr_accu(env
, ctx
);
4057 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4058 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
4060 case OPC1_16_SR_NOT
:
4061 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4062 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
4068 * 32 bit instructions
4072 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
4079 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4080 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4081 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4083 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4086 case OPC2_32_ABS_LD_A
:
4087 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4089 case OPC2_32_ABS_LD_D
:
4090 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4092 case OPC2_32_ABS_LD_DA
:
4093 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4095 case OPC2_32_ABS_LD_W
:
4096 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4100 tcg_temp_free(temp
);
4103 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
4110 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4111 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4112 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4114 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4117 case OPC2_32_ABS_LD_B
:
4118 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
4120 case OPC2_32_ABS_LD_BU
:
4121 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4123 case OPC2_32_ABS_LD_H
:
4124 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
4126 case OPC2_32_ABS_LD_HU
:
4127 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4131 tcg_temp_free(temp
);
4134 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
4141 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4142 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4143 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4145 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4148 case OPC2_32_ABS_LDMST
:
4149 gen_ldmst(ctx
, r1
, temp
);
4151 case OPC2_32_ABS_SWAP_W
:
4152 gen_swap(ctx
, r1
, temp
);
4156 tcg_temp_free(temp
);
4159 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
4164 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4165 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4168 case OPC2_32_ABS_LDLCX
:
4169 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
4171 case OPC2_32_ABS_LDUCX
:
4172 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
4174 case OPC2_32_ABS_STLCX
:
4175 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
4177 case OPC2_32_ABS_STUCX
:
4178 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
4183 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
4190 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4191 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4192 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4194 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4197 case OPC2_32_ABS_ST_A
:
4198 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4200 case OPC2_32_ABS_ST_D
:
4201 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4203 case OPC2_32_ABS_ST_DA
:
4204 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4206 case OPC2_32_ABS_ST_W
:
4207 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4211 tcg_temp_free(temp
);
4214 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
4221 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4222 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4223 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4225 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4228 case OPC2_32_ABS_ST_B
:
4229 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4231 case OPC2_32_ABS_ST_H
:
4232 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4235 tcg_temp_free(temp
);
4240 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4246 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4247 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4248 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4249 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4250 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4251 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4255 case OPC2_32_BIT_AND_AND_T
:
4256 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4257 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
4259 case OPC2_32_BIT_AND_ANDN_T
:
4260 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4261 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
4263 case OPC2_32_BIT_AND_NOR_T
:
4264 if (TCG_TARGET_HAS_andc_i32
) {
4265 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4266 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
4268 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4269 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4272 case OPC2_32_BIT_AND_OR_T
:
4273 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4274 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4279 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
4284 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4285 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4286 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4287 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4288 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4289 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4292 case OPC2_32_BIT_AND_T
:
4293 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4294 pos1
, pos2
, &tcg_gen_and_tl
);
4296 case OPC2_32_BIT_ANDN_T
:
4297 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4298 pos1
, pos2
, &tcg_gen_andc_tl
);
4300 case OPC2_32_BIT_NOR_T
:
4301 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4302 pos1
, pos2
, &tcg_gen_nor_tl
);
4304 case OPC2_32_BIT_OR_T
:
4305 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4306 pos1
, pos2
, &tcg_gen_or_tl
);
4311 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4317 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4318 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4319 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4320 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4321 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4322 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4324 temp
= tcg_temp_new();
4326 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4327 if (op2
== OPC2_32_BIT_INSN_T
) {
4328 tcg_gen_not_tl(temp
, temp
);
4330 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4331 tcg_temp_free(temp
);
4334 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4341 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4342 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4343 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4344 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4345 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4346 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4349 case OPC2_32_BIT_NAND_T
:
4350 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4351 pos1
, pos2
, &tcg_gen_nand_tl
);
4353 case OPC2_32_BIT_ORN_T
:
4354 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4355 pos1
, pos2
, &tcg_gen_orc_tl
);
4357 case OPC2_32_BIT_XNOR_T
:
4358 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4359 pos1
, pos2
, &tcg_gen_eqv_tl
);
4361 case OPC2_32_BIT_XOR_T
:
4362 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4363 pos1
, pos2
, &tcg_gen_xor_tl
);
4368 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
4375 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4376 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4377 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4378 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4379 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4380 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4383 case OPC2_32_BIT_OR_AND_T
:
4384 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4385 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4387 case OPC2_32_BIT_OR_ANDN_T
:
4388 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4389 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4391 case OPC2_32_BIT_OR_NOR_T
:
4392 if (TCG_TARGET_HAS_orc_i32
) {
4393 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4394 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4396 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4397 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4400 case OPC2_32_BIT_OR_OR_T
:
4401 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4402 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4407 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
4414 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4415 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4416 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4417 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4418 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4419 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4421 temp
= tcg_temp_new();
4424 case OPC2_32_BIT_SH_AND_T
:
4425 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4426 pos1
, pos2
, &tcg_gen_and_tl
);
4428 case OPC2_32_BIT_SH_ANDN_T
:
4429 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4430 pos1
, pos2
, &tcg_gen_andc_tl
);
4432 case OPC2_32_BIT_SH_NOR_T
:
4433 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4434 pos1
, pos2
, &tcg_gen_nor_tl
);
4436 case OPC2_32_BIT_SH_OR_T
:
4437 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4438 pos1
, pos2
, &tcg_gen_or_tl
);
4441 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4442 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4443 tcg_temp_free(temp
);
4446 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4453 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4454 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4455 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4456 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4457 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4458 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4460 temp
= tcg_temp_new();
4463 case OPC2_32_BIT_SH_NAND_T
:
4464 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4465 pos1
, pos2
, &tcg_gen_nand_tl
);
4467 case OPC2_32_BIT_SH_ORN_T
:
4468 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4469 pos1
, pos2
, &tcg_gen_orc_tl
);
4471 case OPC2_32_BIT_SH_XNOR_T
:
4472 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4473 pos1
, pos2
, &tcg_gen_eqv_tl
);
4475 case OPC2_32_BIT_SH_XOR_T
:
4476 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4477 pos1
, pos2
, &tcg_gen_xor_tl
);
4480 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4481 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4482 tcg_temp_free(temp
);
4488 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4496 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4497 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4498 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4499 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4502 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4503 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4504 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4505 /* instruction to access the cache */
4507 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4508 case OPC2_32_BO_CACHEA_W_POSTINC
:
4509 case OPC2_32_BO_CACHEA_I_POSTINC
:
4510 /* instruction to access the cache, but we still need to handle
4511 the addressing mode */
4512 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4514 case OPC2_32_BO_CACHEA_WI_PREINC
:
4515 case OPC2_32_BO_CACHEA_W_PREINC
:
4516 case OPC2_32_BO_CACHEA_I_PREINC
:
4517 /* instruction to access the cache, but we still need to handle
4518 the addressing mode */
4519 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4521 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4522 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4523 /* TODO: Raise illegal opcode trap,
4524 if !tricore_feature(TRICORE_FEATURE_131) */
4526 case OPC2_32_BO_CACHEI_W_POSTINC
:
4527 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4528 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4529 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4530 } /* TODO: else raise illegal opcode trap */
4532 case OPC2_32_BO_CACHEI_W_PREINC
:
4533 case OPC2_32_BO_CACHEI_WI_PREINC
:
4534 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4535 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4536 } /* TODO: else raise illegal opcode trap */
4538 case OPC2_32_BO_ST_A_SHORTOFF
:
4539 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4541 case OPC2_32_BO_ST_A_POSTINC
:
4542 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4544 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4546 case OPC2_32_BO_ST_A_PREINC
:
4547 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4549 case OPC2_32_BO_ST_B_SHORTOFF
:
4550 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4552 case OPC2_32_BO_ST_B_POSTINC
:
4553 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4555 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4557 case OPC2_32_BO_ST_B_PREINC
:
4558 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4560 case OPC2_32_BO_ST_D_SHORTOFF
:
4561 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4564 case OPC2_32_BO_ST_D_POSTINC
:
4565 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4566 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4568 case OPC2_32_BO_ST_D_PREINC
:
4569 temp
= tcg_temp_new();
4570 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4571 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4572 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4573 tcg_temp_free(temp
);
4575 case OPC2_32_BO_ST_DA_SHORTOFF
:
4576 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4579 case OPC2_32_BO_ST_DA_POSTINC
:
4580 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4581 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4583 case OPC2_32_BO_ST_DA_PREINC
:
4584 temp
= tcg_temp_new();
4585 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4586 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4587 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4588 tcg_temp_free(temp
);
4590 case OPC2_32_BO_ST_H_SHORTOFF
:
4591 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4593 case OPC2_32_BO_ST_H_POSTINC
:
4594 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4596 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4598 case OPC2_32_BO_ST_H_PREINC
:
4599 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4601 case OPC2_32_BO_ST_Q_SHORTOFF
:
4602 temp
= tcg_temp_new();
4603 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4604 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4605 tcg_temp_free(temp
);
4607 case OPC2_32_BO_ST_Q_POSTINC
:
4608 temp
= tcg_temp_new();
4609 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4610 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4612 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4613 tcg_temp_free(temp
);
4615 case OPC2_32_BO_ST_Q_PREINC
:
4616 temp
= tcg_temp_new();
4617 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4618 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4619 tcg_temp_free(temp
);
4621 case OPC2_32_BO_ST_W_SHORTOFF
:
4622 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4624 case OPC2_32_BO_ST_W_POSTINC
:
4625 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4627 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4629 case OPC2_32_BO_ST_W_PREINC
:
4630 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4635 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4641 TCGv temp
, temp2
, temp3
;
4643 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4644 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4645 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4646 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4648 temp
= tcg_temp_new();
4649 temp2
= tcg_temp_new();
4650 temp3
= tcg_const_i32(off10
);
4652 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4653 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4656 case OPC2_32_BO_CACHEA_WI_BR
:
4657 case OPC2_32_BO_CACHEA_W_BR
:
4658 case OPC2_32_BO_CACHEA_I_BR
:
4659 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4661 case OPC2_32_BO_CACHEA_WI_CIRC
:
4662 case OPC2_32_BO_CACHEA_W_CIRC
:
4663 case OPC2_32_BO_CACHEA_I_CIRC
:
4664 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4666 case OPC2_32_BO_ST_A_BR
:
4667 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4668 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4670 case OPC2_32_BO_ST_A_CIRC
:
4671 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4672 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4674 case OPC2_32_BO_ST_B_BR
:
4675 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4676 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4678 case OPC2_32_BO_ST_B_CIRC
:
4679 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4680 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4682 case OPC2_32_BO_ST_D_BR
:
4683 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4684 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4686 case OPC2_32_BO_ST_D_CIRC
:
4687 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4688 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4689 tcg_gen_addi_tl(temp
, temp
, 4);
4690 tcg_gen_rem_tl(temp
, temp
, temp2
);
4691 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4692 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4693 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4695 case OPC2_32_BO_ST_DA_BR
:
4696 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4697 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4699 case OPC2_32_BO_ST_DA_CIRC
:
4700 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4701 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4702 tcg_gen_addi_tl(temp
, temp
, 4);
4703 tcg_gen_rem_tl(temp
, temp
, temp2
);
4704 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4705 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4706 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4708 case OPC2_32_BO_ST_H_BR
:
4709 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4710 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4712 case OPC2_32_BO_ST_H_CIRC
:
4713 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4714 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4716 case OPC2_32_BO_ST_Q_BR
:
4717 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4718 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4719 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4721 case OPC2_32_BO_ST_Q_CIRC
:
4722 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4723 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4724 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4726 case OPC2_32_BO_ST_W_BR
:
4727 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4728 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4730 case OPC2_32_BO_ST_W_CIRC
:
4731 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4732 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4735 tcg_temp_free(temp
);
4736 tcg_temp_free(temp2
);
4737 tcg_temp_free(temp3
);
4740 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4748 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4749 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4750 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4751 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4754 case OPC2_32_BO_LD_A_SHORTOFF
:
4755 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4757 case OPC2_32_BO_LD_A_POSTINC
:
4758 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4760 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4762 case OPC2_32_BO_LD_A_PREINC
:
4763 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4765 case OPC2_32_BO_LD_B_SHORTOFF
:
4766 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4768 case OPC2_32_BO_LD_B_POSTINC
:
4769 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4771 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4773 case OPC2_32_BO_LD_B_PREINC
:
4774 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4776 case OPC2_32_BO_LD_BU_SHORTOFF
:
4777 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4779 case OPC2_32_BO_LD_BU_POSTINC
:
4780 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4782 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4784 case OPC2_32_BO_LD_BU_PREINC
:
4785 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4787 case OPC2_32_BO_LD_D_SHORTOFF
:
4788 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4791 case OPC2_32_BO_LD_D_POSTINC
:
4792 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4793 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4795 case OPC2_32_BO_LD_D_PREINC
:
4796 temp
= tcg_temp_new();
4797 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4798 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4799 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4800 tcg_temp_free(temp
);
4802 case OPC2_32_BO_LD_DA_SHORTOFF
:
4803 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4806 case OPC2_32_BO_LD_DA_POSTINC
:
4807 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4808 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4810 case OPC2_32_BO_LD_DA_PREINC
:
4811 temp
= tcg_temp_new();
4812 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4813 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4814 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4815 tcg_temp_free(temp
);
4817 case OPC2_32_BO_LD_H_SHORTOFF
:
4818 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4820 case OPC2_32_BO_LD_H_POSTINC
:
4821 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4823 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4825 case OPC2_32_BO_LD_H_PREINC
:
4826 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4828 case OPC2_32_BO_LD_HU_SHORTOFF
:
4829 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4831 case OPC2_32_BO_LD_HU_POSTINC
:
4832 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4834 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4836 case OPC2_32_BO_LD_HU_PREINC
:
4837 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4839 case OPC2_32_BO_LD_Q_SHORTOFF
:
4840 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4841 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4843 case OPC2_32_BO_LD_Q_POSTINC
:
4844 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4846 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4847 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4849 case OPC2_32_BO_LD_Q_PREINC
:
4850 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4851 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4853 case OPC2_32_BO_LD_W_SHORTOFF
:
4854 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4856 case OPC2_32_BO_LD_W_POSTINC
:
4857 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4859 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4861 case OPC2_32_BO_LD_W_PREINC
:
4862 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4867 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4874 TCGv temp
, temp2
, temp3
;
4876 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4877 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4878 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4879 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4881 temp
= tcg_temp_new();
4882 temp2
= tcg_temp_new();
4883 temp3
= tcg_const_i32(off10
);
4885 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4886 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4890 case OPC2_32_BO_LD_A_BR
:
4891 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4892 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4894 case OPC2_32_BO_LD_A_CIRC
:
4895 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4896 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4898 case OPC2_32_BO_LD_B_BR
:
4899 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4900 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4902 case OPC2_32_BO_LD_B_CIRC
:
4903 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4904 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4906 case OPC2_32_BO_LD_BU_BR
:
4907 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4908 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4910 case OPC2_32_BO_LD_BU_CIRC
:
4911 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4912 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4914 case OPC2_32_BO_LD_D_BR
:
4915 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4916 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4918 case OPC2_32_BO_LD_D_CIRC
:
4919 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4920 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4921 tcg_gen_addi_tl(temp
, temp
, 4);
4922 tcg_gen_rem_tl(temp
, temp
, temp2
);
4923 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4924 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4925 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4927 case OPC2_32_BO_LD_DA_BR
:
4928 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4929 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4931 case OPC2_32_BO_LD_DA_CIRC
:
4932 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4933 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4934 tcg_gen_addi_tl(temp
, temp
, 4);
4935 tcg_gen_rem_tl(temp
, temp
, temp2
);
4936 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4937 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4938 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4940 case OPC2_32_BO_LD_H_BR
:
4941 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4942 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4944 case OPC2_32_BO_LD_H_CIRC
:
4945 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4946 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4948 case OPC2_32_BO_LD_HU_BR
:
4949 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4950 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4952 case OPC2_32_BO_LD_HU_CIRC
:
4953 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4954 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4956 case OPC2_32_BO_LD_Q_BR
:
4957 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4958 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4959 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4961 case OPC2_32_BO_LD_Q_CIRC
:
4962 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4963 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4964 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4966 case OPC2_32_BO_LD_W_BR
:
4967 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4968 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4970 case OPC2_32_BO_LD_W_CIRC
:
4971 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4972 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4975 tcg_temp_free(temp
);
4976 tcg_temp_free(temp2
);
4977 tcg_temp_free(temp3
);
4980 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
4989 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4990 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4991 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4992 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4995 temp
= tcg_temp_new();
4996 temp2
= tcg_temp_new();
4999 case OPC2_32_BO_LDLCX_SHORTOFF
:
5000 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5001 gen_helper_ldlcx(cpu_env
, temp
);
5003 case OPC2_32_BO_LDMST_SHORTOFF
:
5004 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5005 gen_ldmst(ctx
, r1
, temp
);
5007 case OPC2_32_BO_LDMST_POSTINC
:
5008 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5009 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5011 case OPC2_32_BO_LDMST_PREINC
:
5012 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5013 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5015 case OPC2_32_BO_LDUCX_SHORTOFF
:
5016 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5017 gen_helper_lducx(cpu_env
, temp
);
5019 case OPC2_32_BO_LEA_SHORTOFF
:
5020 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
5022 case OPC2_32_BO_STLCX_SHORTOFF
:
5023 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5024 gen_helper_stlcx(cpu_env
, temp
);
5026 case OPC2_32_BO_STUCX_SHORTOFF
:
5027 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5028 gen_helper_stucx(cpu_env
, temp
);
5030 case OPC2_32_BO_SWAP_W_SHORTOFF
:
5031 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5032 gen_swap(ctx
, r1
, temp
);
5034 case OPC2_32_BO_SWAP_W_POSTINC
:
5035 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5036 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5038 case OPC2_32_BO_SWAP_W_PREINC
:
5039 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5040 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5043 tcg_temp_free(temp
);
5044 tcg_temp_free(temp2
);
5047 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
5054 TCGv temp
, temp2
, temp3
;
5056 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5057 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5058 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5059 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5061 temp
= tcg_temp_new();
5062 temp2
= tcg_temp_new();
5063 temp3
= tcg_const_i32(off10
);
5065 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
5066 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5069 case OPC2_32_BO_LDMST_BR
:
5070 gen_ldmst(ctx
, r1
, temp2
);
5071 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5073 case OPC2_32_BO_LDMST_CIRC
:
5074 gen_ldmst(ctx
, r1
, temp2
);
5075 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5077 case OPC2_32_BO_SWAP_W_BR
:
5078 gen_swap(ctx
, r1
, temp2
);
5079 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5081 case OPC2_32_BO_SWAP_W_CIRC
:
5082 gen_swap(ctx
, r1
, temp2
);
5083 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5086 tcg_temp_free(temp
);
5087 tcg_temp_free(temp2
);
5088 tcg_temp_free(temp3
);
5091 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
5097 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
5098 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
5099 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
5102 case OPC1_32_BOL_LD_A_LONGOFF
:
5103 temp
= tcg_temp_new();
5104 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5105 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5106 tcg_temp_free(temp
);
5108 case OPC1_32_BOL_LD_W_LONGOFF
:
5109 temp
= tcg_temp_new();
5110 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5111 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5112 tcg_temp_free(temp
);
5114 case OPC1_32_BOL_LEA_LONGOFF
:
5115 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
5117 case OPC1_32_BOL_ST_A_LONGOFF
:
5118 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5119 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5121 /* raise illegal opcode trap */
5124 case OPC1_32_BOL_ST_W_LONGOFF
:
5125 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5127 case OPC1_32_BOL_LD_B_LONGOFF
:
5128 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5129 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5131 /* raise illegal opcode trap */
5134 case OPC1_32_BOL_LD_BU_LONGOFF
:
5135 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5136 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
5138 /* raise illegal opcode trap */
5141 case OPC1_32_BOL_LD_H_LONGOFF
:
5142 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5143 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5145 /* raise illegal opcode trap */
5148 case OPC1_32_BOL_LD_HU_LONGOFF
:
5149 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5150 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
5152 /* raise illegal opcode trap */
5155 case OPC1_32_BOL_ST_B_LONGOFF
:
5156 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5157 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5159 /* raise illegal opcode trap */
5162 case OPC1_32_BOL_ST_H_LONGOFF
:
5163 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5164 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5166 /* raise illegal opcode trap */
5173 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5180 r2
= MASK_OP_RC_D(ctx
->opcode
);
5181 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5182 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5183 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5185 temp
= tcg_temp_new();
5188 case OPC2_32_RC_AND
:
5189 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5191 case OPC2_32_RC_ANDN
:
5192 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5194 case OPC2_32_RC_NAND
:
5195 tcg_gen_movi_tl(temp
, const9
);
5196 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5198 case OPC2_32_RC_NOR
:
5199 tcg_gen_movi_tl(temp
, const9
);
5200 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5203 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5205 case OPC2_32_RC_ORN
:
5206 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5209 const9
= sextract32(const9
, 0, 6);
5210 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5212 case OPC2_32_RC_SH_H
:
5213 const9
= sextract32(const9
, 0, 5);
5214 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5216 case OPC2_32_RC_SHA
:
5217 const9
= sextract32(const9
, 0, 6);
5218 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5220 case OPC2_32_RC_SHA_H
:
5221 const9
= sextract32(const9
, 0, 5);
5222 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5224 case OPC2_32_RC_SHAS
:
5225 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5227 case OPC2_32_RC_XNOR
:
5228 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5229 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5231 case OPC2_32_RC_XOR
:
5232 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5235 tcg_temp_free(temp
);
5238 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5246 r2
= MASK_OP_RC_D(ctx
->opcode
);
5247 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5248 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5250 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5252 temp
= tcg_temp_new();
5255 case OPC2_32_RC_ABSDIF
:
5256 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5258 case OPC2_32_RC_ABSDIFS
:
5259 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5261 case OPC2_32_RC_ADD
:
5262 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5264 case OPC2_32_RC_ADDC
:
5265 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5267 case OPC2_32_RC_ADDS
:
5268 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5270 case OPC2_32_RC_ADDS_U
:
5271 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5273 case OPC2_32_RC_ADDX
:
5274 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5276 case OPC2_32_RC_AND_EQ
:
5277 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5278 const9
, &tcg_gen_and_tl
);
5280 case OPC2_32_RC_AND_GE
:
5281 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5282 const9
, &tcg_gen_and_tl
);
5284 case OPC2_32_RC_AND_GE_U
:
5285 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5286 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5287 const9
, &tcg_gen_and_tl
);
5289 case OPC2_32_RC_AND_LT
:
5290 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5291 const9
, &tcg_gen_and_tl
);
5293 case OPC2_32_RC_AND_LT_U
:
5294 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5295 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5296 const9
, &tcg_gen_and_tl
);
5298 case OPC2_32_RC_AND_NE
:
5299 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5300 const9
, &tcg_gen_and_tl
);
5303 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5305 case OPC2_32_RC_EQANY_B
:
5306 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5308 case OPC2_32_RC_EQANY_H
:
5309 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5312 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5314 case OPC2_32_RC_GE_U
:
5315 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5316 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5319 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5321 case OPC2_32_RC_LT_U
:
5322 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5323 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5325 case OPC2_32_RC_MAX
:
5326 tcg_gen_movi_tl(temp
, const9
);
5327 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5328 cpu_gpr_d
[r1
], temp
);
5330 case OPC2_32_RC_MAX_U
:
5331 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5332 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5333 cpu_gpr_d
[r1
], temp
);
5335 case OPC2_32_RC_MIN
:
5336 tcg_gen_movi_tl(temp
, const9
);
5337 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5338 cpu_gpr_d
[r1
], temp
);
5340 case OPC2_32_RC_MIN_U
:
5341 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5342 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5343 cpu_gpr_d
[r1
], temp
);
5346 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5348 case OPC2_32_RC_OR_EQ
:
5349 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5350 const9
, &tcg_gen_or_tl
);
5352 case OPC2_32_RC_OR_GE
:
5353 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5354 const9
, &tcg_gen_or_tl
);
5356 case OPC2_32_RC_OR_GE_U
:
5357 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5358 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5359 const9
, &tcg_gen_or_tl
);
5361 case OPC2_32_RC_OR_LT
:
5362 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5363 const9
, &tcg_gen_or_tl
);
5365 case OPC2_32_RC_OR_LT_U
:
5366 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5367 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5368 const9
, &tcg_gen_or_tl
);
5370 case OPC2_32_RC_OR_NE
:
5371 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5372 const9
, &tcg_gen_or_tl
);
5374 case OPC2_32_RC_RSUB
:
5375 tcg_gen_movi_tl(temp
, const9
);
5376 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5378 case OPC2_32_RC_RSUBS
:
5379 tcg_gen_movi_tl(temp
, const9
);
5380 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5382 case OPC2_32_RC_RSUBS_U
:
5383 tcg_gen_movi_tl(temp
, const9
);
5384 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5386 case OPC2_32_RC_SH_EQ
:
5387 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5389 case OPC2_32_RC_SH_GE
:
5390 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5392 case OPC2_32_RC_SH_GE_U
:
5393 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5394 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5396 case OPC2_32_RC_SH_LT
:
5397 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5399 case OPC2_32_RC_SH_LT_U
:
5400 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5401 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5403 case OPC2_32_RC_SH_NE
:
5404 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5406 case OPC2_32_RC_XOR_EQ
:
5407 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5408 const9
, &tcg_gen_xor_tl
);
5410 case OPC2_32_RC_XOR_GE
:
5411 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5412 const9
, &tcg_gen_xor_tl
);
5414 case OPC2_32_RC_XOR_GE_U
:
5415 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5416 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5417 const9
, &tcg_gen_xor_tl
);
5419 case OPC2_32_RC_XOR_LT
:
5420 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5421 const9
, &tcg_gen_xor_tl
);
5423 case OPC2_32_RC_XOR_LT_U
:
5424 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5425 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5426 const9
, &tcg_gen_xor_tl
);
5428 case OPC2_32_RC_XOR_NE
:
5429 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5430 const9
, &tcg_gen_xor_tl
);
5433 tcg_temp_free(temp
);
5436 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
5441 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5442 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5445 case OPC2_32_RC_BISR
:
5446 gen_helper_1arg(bisr
, const9
);
5448 case OPC2_32_RC_SYSCALL
:
5449 /* TODO: Add exception generation */
5454 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5460 r2
= MASK_OP_RC_D(ctx
->opcode
);
5461 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5462 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5464 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5467 case OPC2_32_RC_MUL_32
:
5468 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5470 case OPC2_32_RC_MUL_64
:
5471 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5473 case OPC2_32_RC_MULS_32
:
5474 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5476 case OPC2_32_RC_MUL_U_64
:
5477 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5478 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5480 case OPC2_32_RC_MULS_U_32
:
5481 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5482 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5488 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5492 int32_t pos
, width
, const4
;
5496 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5497 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5498 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5499 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5500 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5501 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5504 case OPC2_32_RCPW_IMASK
:
5505 /* if pos + width > 31 undefined result */
5506 if (pos
+ width
<= 31) {
5507 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5508 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5511 case OPC2_32_RCPW_INSERT
:
5512 /* if pos + width > 32 undefined result */
5513 if (pos
+ width
<= 32) {
5514 temp
= tcg_const_i32(const4
);
5515 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5516 tcg_temp_free(temp
);
5524 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5528 int32_t width
, const4
;
5530 TCGv temp
, temp2
, temp3
;
5532 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5533 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5534 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5535 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5536 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5537 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5539 temp
= tcg_temp_new();
5540 temp2
= tcg_temp_new();
5543 case OPC2_32_RCRW_IMASK
:
5544 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5545 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5546 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5547 tcg_gen_movi_tl(temp2
, const4
);
5548 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5550 case OPC2_32_RCRW_INSERT
:
5551 temp3
= tcg_temp_new();
5553 tcg_gen_movi_tl(temp
, width
);
5554 tcg_gen_movi_tl(temp2
, const4
);
5555 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5556 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5558 tcg_temp_free(temp3
);
5561 tcg_temp_free(temp
);
5562 tcg_temp_free(temp2
);
5567 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5575 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5576 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5577 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5578 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5579 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5582 case OPC2_32_RCR_CADD
:
5583 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5586 case OPC2_32_RCR_CADDN
:
5587 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5590 case OPC2_32_RCR_SEL
:
5591 temp
= tcg_const_i32(0);
5592 temp2
= tcg_const_i32(const9
);
5593 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5594 cpu_gpr_d
[r1
], temp2
);
5595 tcg_temp_free(temp
);
5596 tcg_temp_free(temp2
);
5598 case OPC2_32_RCR_SELN
:
5599 temp
= tcg_const_i32(0);
5600 temp2
= tcg_const_i32(const9
);
5601 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5602 cpu_gpr_d
[r1
], temp2
);
5603 tcg_temp_free(temp
);
5604 tcg_temp_free(temp2
);
5609 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5616 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5617 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5618 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5619 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5620 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5623 case OPC2_32_RCR_MADD_32
:
5624 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5626 case OPC2_32_RCR_MADD_64
:
5627 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5628 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5630 case OPC2_32_RCR_MADDS_32
:
5631 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5633 case OPC2_32_RCR_MADDS_64
:
5634 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5635 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5637 case OPC2_32_RCR_MADD_U_64
:
5638 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5639 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5640 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5642 case OPC2_32_RCR_MADDS_U_32
:
5643 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5644 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5646 case OPC2_32_RCR_MADDS_U_64
:
5647 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5648 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5649 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5654 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5661 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5662 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5663 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5664 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5665 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5668 case OPC2_32_RCR_MSUB_32
:
5669 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5671 case OPC2_32_RCR_MSUB_64
:
5672 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5673 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5675 case OPC2_32_RCR_MSUBS_32
:
5676 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5678 case OPC2_32_RCR_MSUBS_64
:
5679 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5680 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5682 case OPC2_32_RCR_MSUB_U_64
:
5683 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5684 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5685 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5687 case OPC2_32_RCR_MSUBS_U_32
:
5688 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5689 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5691 case OPC2_32_RCR_MSUBS_U_64
:
5692 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5693 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5694 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5701 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5707 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5708 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5709 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5712 case OPC1_32_RLC_ADDI
:
5713 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5715 case OPC1_32_RLC_ADDIH
:
5716 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5718 case OPC1_32_RLC_ADDIH_A
:
5719 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5721 case OPC1_32_RLC_MFCR
:
5722 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5723 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5725 case OPC1_32_RLC_MOV
:
5726 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5728 case OPC1_32_RLC_MOV_64
:
5729 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5730 if ((r2
& 0x1) != 0) {
5731 /* TODO: raise OPD trap */
5733 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5734 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5736 /* TODO: raise illegal opcode trap */
5739 case OPC1_32_RLC_MOV_U
:
5740 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5741 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5743 case OPC1_32_RLC_MOV_H
:
5744 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5746 case OPC1_32_RLC_MOVH_A
:
5747 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5749 case OPC1_32_RLC_MTCR
:
5750 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5751 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5757 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5762 r3
= MASK_OP_RR_D(ctx
->opcode
);
5763 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5764 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5765 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5768 case OPC2_32_RR_ABS
:
5769 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5771 case OPC2_32_RR_ABS_B
:
5772 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5774 case OPC2_32_RR_ABS_H
:
5775 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5777 case OPC2_32_RR_ABSDIF
:
5778 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5780 case OPC2_32_RR_ABSDIF_B
:
5781 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5784 case OPC2_32_RR_ABSDIF_H
:
5785 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5788 case OPC2_32_RR_ABSDIFS
:
5789 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5792 case OPC2_32_RR_ABSDIFS_H
:
5793 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5796 case OPC2_32_RR_ABSS
:
5797 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5799 case OPC2_32_RR_ABSS_H
:
5800 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5802 case OPC2_32_RR_ADD
:
5803 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5805 case OPC2_32_RR_ADD_B
:
5806 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5808 case OPC2_32_RR_ADD_H
:
5809 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5811 case OPC2_32_RR_ADDC
:
5812 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5814 case OPC2_32_RR_ADDS
:
5815 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5817 case OPC2_32_RR_ADDS_H
:
5818 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5821 case OPC2_32_RR_ADDS_HU
:
5822 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5825 case OPC2_32_RR_ADDS_U
:
5826 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5829 case OPC2_32_RR_ADDX
:
5830 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5832 case OPC2_32_RR_AND_EQ
:
5833 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5834 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5836 case OPC2_32_RR_AND_GE
:
5837 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5838 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5840 case OPC2_32_RR_AND_GE_U
:
5841 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5842 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5844 case OPC2_32_RR_AND_LT
:
5845 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5846 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5848 case OPC2_32_RR_AND_LT_U
:
5849 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5850 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5852 case OPC2_32_RR_AND_NE
:
5853 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5854 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5857 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5860 case OPC2_32_RR_EQ_B
:
5861 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5863 case OPC2_32_RR_EQ_H
:
5864 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5866 case OPC2_32_RR_EQ_W
:
5867 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5869 case OPC2_32_RR_EQANY_B
:
5870 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5872 case OPC2_32_RR_EQANY_H
:
5873 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5876 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5879 case OPC2_32_RR_GE_U
:
5880 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5884 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5887 case OPC2_32_RR_LT_U
:
5888 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5891 case OPC2_32_RR_LT_B
:
5892 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5894 case OPC2_32_RR_LT_BU
:
5895 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5897 case OPC2_32_RR_LT_H
:
5898 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5900 case OPC2_32_RR_LT_HU
:
5901 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5903 case OPC2_32_RR_LT_W
:
5904 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5906 case OPC2_32_RR_LT_WU
:
5907 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5909 case OPC2_32_RR_MAX
:
5910 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5911 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5913 case OPC2_32_RR_MAX_U
:
5914 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5915 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5917 case OPC2_32_RR_MAX_B
:
5918 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5920 case OPC2_32_RR_MAX_BU
:
5921 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5923 case OPC2_32_RR_MAX_H
:
5924 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5926 case OPC2_32_RR_MAX_HU
:
5927 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5929 case OPC2_32_RR_MIN
:
5930 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5931 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5933 case OPC2_32_RR_MIN_U
:
5934 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5935 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5937 case OPC2_32_RR_MIN_B
:
5938 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5940 case OPC2_32_RR_MIN_BU
:
5941 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5943 case OPC2_32_RR_MIN_H
:
5944 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5946 case OPC2_32_RR_MIN_HU
:
5947 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5949 case OPC2_32_RR_MOV
:
5950 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5953 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5956 case OPC2_32_RR_OR_EQ
:
5957 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5958 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5960 case OPC2_32_RR_OR_GE
:
5961 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5962 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5964 case OPC2_32_RR_OR_GE_U
:
5965 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5966 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5968 case OPC2_32_RR_OR_LT
:
5969 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5970 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5972 case OPC2_32_RR_OR_LT_U
:
5973 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5974 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5976 case OPC2_32_RR_OR_NE
:
5977 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5978 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5980 case OPC2_32_RR_SAT_B
:
5981 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5983 case OPC2_32_RR_SAT_BU
:
5984 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5986 case OPC2_32_RR_SAT_H
:
5987 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5989 case OPC2_32_RR_SAT_HU
:
5990 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5992 case OPC2_32_RR_SH_EQ
:
5993 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5996 case OPC2_32_RR_SH_GE
:
5997 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6000 case OPC2_32_RR_SH_GE_U
:
6001 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6004 case OPC2_32_RR_SH_LT
:
6005 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6008 case OPC2_32_RR_SH_LT_U
:
6009 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6012 case OPC2_32_RR_SH_NE
:
6013 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6016 case OPC2_32_RR_SUB
:
6017 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6019 case OPC2_32_RR_SUB_B
:
6020 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6022 case OPC2_32_RR_SUB_H
:
6023 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6025 case OPC2_32_RR_SUBC
:
6026 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6028 case OPC2_32_RR_SUBS
:
6029 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6031 case OPC2_32_RR_SUBS_U
:
6032 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6034 case OPC2_32_RR_SUBS_H
:
6035 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6038 case OPC2_32_RR_SUBS_HU
:
6039 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6042 case OPC2_32_RR_SUBX
:
6043 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6045 case OPC2_32_RR_XOR_EQ
:
6046 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6047 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6049 case OPC2_32_RR_XOR_GE
:
6050 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6051 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6053 case OPC2_32_RR_XOR_GE_U
:
6054 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6055 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6057 case OPC2_32_RR_XOR_LT
:
6058 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6059 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6061 case OPC2_32_RR_XOR_LT_U
:
6062 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6063 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6065 case OPC2_32_RR_XOR_NE
:
6066 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6067 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6072 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
6078 r3
= MASK_OP_RR_D(ctx
->opcode
);
6079 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6080 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6082 temp
= tcg_temp_new();
6083 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6086 case OPC2_32_RR_AND
:
6087 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6089 case OPC2_32_RR_ANDN
:
6090 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6092 case OPC2_32_RR_CLO
:
6093 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6095 case OPC2_32_RR_CLO_H
:
6096 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6098 case OPC2_32_RR_CLS
:
6099 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6101 case OPC2_32_RR_CLS_H
:
6102 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6104 case OPC2_32_RR_CLZ
:
6105 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6107 case OPC2_32_RR_CLZ_H
:
6108 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6110 case OPC2_32_RR_NAND
:
6111 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6113 case OPC2_32_RR_NOR
:
6114 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6117 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6119 case OPC2_32_RR_ORN
:
6120 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6123 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6125 case OPC2_32_RR_SH_H
:
6126 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6128 case OPC2_32_RR_SHA
:
6129 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6131 case OPC2_32_RR_SHA_H
:
6132 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6134 case OPC2_32_RR_SHAS
:
6135 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6137 case OPC2_32_RR_XNOR
:
6138 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6140 case OPC2_32_RR_XOR
:
6141 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6144 tcg_temp_free(temp
);
6147 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
6153 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6154 r3
= MASK_OP_RR_D(ctx
->opcode
);
6155 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6156 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6157 n
= MASK_OP_RR_N(ctx
->opcode
);
6160 case OPC2_32_RR_ADD_A
:
6161 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6163 case OPC2_32_RR_ADDSC_A
:
6164 temp
= tcg_temp_new();
6165 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6166 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6167 tcg_temp_free(temp
);
6169 case OPC2_32_RR_ADDSC_AT
:
6170 temp
= tcg_temp_new();
6171 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6172 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6173 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6174 tcg_temp_free(temp
);
6176 case OPC2_32_RR_EQ_A
:
6177 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6180 case OPC2_32_RR_EQZ
:
6181 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6183 case OPC2_32_RR_GE_A
:
6184 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6187 case OPC2_32_RR_LT_A
:
6188 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6191 case OPC2_32_RR_MOV_A
:
6192 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6194 case OPC2_32_RR_MOV_AA
:
6195 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6197 case OPC2_32_RR_MOV_D
:
6198 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6200 case OPC2_32_RR_NE_A
:
6201 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6204 case OPC2_32_RR_NEZ_A
:
6205 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6207 case OPC2_32_RR_SUB_A
:
6208 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6213 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
6218 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6219 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6223 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6225 case OPC2_32_RR_JLI
:
6226 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
6227 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6229 case OPC2_32_RR_CALLI
:
6230 gen_helper_1arg(call
, ctx
->next_pc
);
6231 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6235 ctx
->bstate
= BS_BRANCH
;
6238 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6243 TCGv temp
, temp2
, temp3
;
6245 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6246 r3
= MASK_OP_RR_D(ctx
->opcode
);
6247 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6248 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6251 case OPC2_32_RR_BMERGE
:
6252 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6254 case OPC2_32_RR_BSPLIT
:
6255 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6257 case OPC2_32_RR_DVINIT_B
:
6258 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6261 case OPC2_32_RR_DVINIT_BU
:
6262 temp
= tcg_temp_new();
6263 temp2
= tcg_temp_new();
6264 temp3
= tcg_temp_new();
6266 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6268 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6269 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6270 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6271 tcg_gen_neg_tl(temp
, temp3
);
6272 /* use cpu_PSW_AV to compare against 0 */
6273 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6275 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6276 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6277 temp2
, cpu_gpr_d
[r2
]);
6278 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6280 /* overflow = (D[b] == 0) */
6281 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6283 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6285 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6287 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6288 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6290 tcg_temp_free(temp
);
6291 tcg_temp_free(temp2
);
6292 tcg_temp_free(temp3
);
6294 case OPC2_32_RR_DVINIT_H
:
6295 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6298 case OPC2_32_RR_DVINIT_HU
:
6299 temp
= tcg_temp_new();
6300 temp2
= tcg_temp_new();
6301 temp3
= tcg_temp_new();
6303 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6305 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6306 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6307 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6308 tcg_gen_neg_tl(temp
, temp3
);
6309 /* use cpu_PSW_AV to compare against 0 */
6310 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6312 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6313 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6314 temp2
, cpu_gpr_d
[r2
]);
6315 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6317 /* overflow = (D[b] == 0) */
6318 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6320 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6322 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6324 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6325 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6326 tcg_temp_free(temp
);
6327 tcg_temp_free(temp2
);
6328 tcg_temp_free(temp3
);
6330 case OPC2_32_RR_DVINIT
:
6331 temp
= tcg_temp_new();
6332 temp2
= tcg_temp_new();
6333 /* overflow = ((D[b] == 0) ||
6334 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6335 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6336 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6337 tcg_gen_and_tl(temp
, temp
, temp2
);
6338 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6339 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6340 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6342 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6344 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6346 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6347 /* sign extend to high reg */
6348 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6349 tcg_temp_free(temp
);
6350 tcg_temp_free(temp2
);
6352 case OPC2_32_RR_DVINIT_U
:
6353 /* overflow = (D[b] == 0) */
6354 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6355 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6357 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6359 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6361 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6362 /* zero extend to high reg*/
6363 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6365 case OPC2_32_RR_PARITY
:
6366 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6368 case OPC2_32_RR_UNPACK
:
6369 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6375 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6383 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6384 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6385 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6386 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6387 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6390 case OPC2_32_RR1_MUL_H_32_LL
:
6391 temp64
= tcg_temp_new_i64();
6392 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6393 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6394 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6395 tcg_temp_free_i64(temp64
);
6397 case OPC2_32_RR1_MUL_H_32_LU
:
6398 temp64
= tcg_temp_new_i64();
6399 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6400 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6401 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6402 tcg_temp_free_i64(temp64
);
6404 case OPC2_32_RR1_MUL_H_32_UL
:
6405 temp64
= tcg_temp_new_i64();
6406 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6407 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6408 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6409 tcg_temp_free_i64(temp64
);
6411 case OPC2_32_RR1_MUL_H_32_UU
:
6412 temp64
= tcg_temp_new_i64();
6413 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6414 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6415 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6416 tcg_temp_free_i64(temp64
);
6418 case OPC2_32_RR1_MULM_H_64_LL
:
6419 temp64
= tcg_temp_new_i64();
6420 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6421 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6423 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6425 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6426 tcg_temp_free_i64(temp64
);
6428 case OPC2_32_RR1_MULM_H_64_LU
:
6429 temp64
= tcg_temp_new_i64();
6430 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6431 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6433 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6435 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6436 tcg_temp_free_i64(temp64
);
6438 case OPC2_32_RR1_MULM_H_64_UL
:
6439 temp64
= tcg_temp_new_i64();
6440 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6441 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6443 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6445 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6446 tcg_temp_free_i64(temp64
);
6448 case OPC2_32_RR1_MULM_H_64_UU
:
6449 temp64
= tcg_temp_new_i64();
6450 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6451 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6453 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6455 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6456 tcg_temp_free_i64(temp64
);
6459 case OPC2_32_RR1_MULR_H_16_LL
:
6460 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6461 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6463 case OPC2_32_RR1_MULR_H_16_LU
:
6464 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6465 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6467 case OPC2_32_RR1_MULR_H_16_UL
:
6468 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6469 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6471 case OPC2_32_RR1_MULR_H_16_UU
:
6472 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6473 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6479 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
6487 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6488 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6489 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6490 n
= MASK_OP_RR1_N(ctx
->opcode
);
6491 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6493 temp
= tcg_temp_new();
6494 temp2
= tcg_temp_new();
6497 case OPC2_32_RR1_MUL_Q_32
:
6498 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6500 case OPC2_32_RR1_MUL_Q_64
:
6501 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6504 case OPC2_32_RR1_MUL_Q_32_L
:
6505 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6506 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6508 case OPC2_32_RR1_MUL_Q_64_L
:
6509 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6510 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6512 case OPC2_32_RR1_MUL_Q_32_U
:
6513 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6514 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6516 case OPC2_32_RR1_MUL_Q_64_U
:
6517 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6518 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6520 case OPC2_32_RR1_MUL_Q_32_LL
:
6521 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6522 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6523 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6525 case OPC2_32_RR1_MUL_Q_32_UU
:
6526 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6527 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6528 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6530 case OPC2_32_RR1_MULR_Q_32_L
:
6531 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6532 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6533 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6535 case OPC2_32_RR1_MULR_Q_32_U
:
6536 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6537 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6538 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6541 tcg_temp_free(temp
);
6542 tcg_temp_free(temp2
);
6546 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6551 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6552 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6553 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6554 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6556 case OPC2_32_RR2_MUL_32
:
6557 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6559 case OPC2_32_RR2_MUL_64
:
6560 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6563 case OPC2_32_RR2_MULS_32
:
6564 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6567 case OPC2_32_RR2_MUL_U_64
:
6568 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6571 case OPC2_32_RR2_MULS_U_32
:
6572 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6579 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6585 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6586 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6587 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6588 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6589 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6590 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6593 case OPC2_32_RRPW_EXTR
:
6594 if (pos
+ width
<= 31) {
6595 /* optimize special cases */
6596 if ((pos
== 0) && (width
== 8)) {
6597 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6598 } else if ((pos
== 0) && (width
== 16)) {
6599 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6601 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6602 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6606 case OPC2_32_RRPW_EXTR_U
:
6608 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6610 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6611 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6614 case OPC2_32_RRPW_IMASK
:
6615 if (pos
+ width
<= 31) {
6616 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6617 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6620 case OPC2_32_RRPW_INSERT
:
6621 if (pos
+ width
<= 31) {
6622 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6630 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6636 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6637 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6638 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6639 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6640 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6643 case OPC2_32_RRR_CADD
:
6644 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6645 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6647 case OPC2_32_RRR_CADDN
:
6648 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6651 case OPC2_32_RRR_CSUB
:
6652 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6655 case OPC2_32_RRR_CSUBN
:
6656 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6659 case OPC2_32_RRR_SEL
:
6660 temp
= tcg_const_i32(0);
6661 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6662 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6663 tcg_temp_free(temp
);
6665 case OPC2_32_RRR_SELN
:
6666 temp
= tcg_const_i32(0);
6667 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6668 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6669 tcg_temp_free(temp
);
6674 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6680 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6681 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6682 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6683 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6684 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6687 case OPC2_32_RRR_DVADJ
:
6688 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6689 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6691 case OPC2_32_RRR_DVSTEP
:
6692 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6693 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6695 case OPC2_32_RRR_DVSTEP_U
:
6696 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6697 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6699 case OPC2_32_RRR_IXMAX
:
6700 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6701 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6703 case OPC2_32_RRR_IXMAX_U
:
6704 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6705 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6707 case OPC2_32_RRR_IXMIN
:
6708 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6709 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6711 case OPC2_32_RRR_IXMIN_U
:
6712 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6713 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6715 case OPC2_32_RRR_PACK
:
6716 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6717 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6723 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6726 uint32_t r1
, r2
, r3
, r4
;
6728 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6729 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6730 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6731 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6732 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6734 case OPC2_32_RRR2_MADD_32
:
6735 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6738 case OPC2_32_RRR2_MADD_64
:
6739 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6740 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6742 case OPC2_32_RRR2_MADDS_32
:
6743 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6744 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6746 case OPC2_32_RRR2_MADDS_64
:
6747 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6748 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6750 case OPC2_32_RRR2_MADD_U_64
:
6751 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6752 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6754 case OPC2_32_RRR2_MADDS_U_32
:
6755 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6756 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6758 case OPC2_32_RRR2_MADDS_U_64
:
6759 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6760 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6765 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6768 uint32_t r1
, r2
, r3
, r4
;
6770 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6771 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6772 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6773 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6774 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6777 case OPC2_32_RRR2_MSUB_32
:
6778 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6781 case OPC2_32_RRR2_MSUB_64
:
6782 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6783 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6785 case OPC2_32_RRR2_MSUBS_32
:
6786 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6787 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6789 case OPC2_32_RRR2_MSUBS_64
:
6790 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6791 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6793 case OPC2_32_RRR2_MSUB_U_64
:
6794 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6795 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6797 case OPC2_32_RRR2_MSUBS_U_32
:
6798 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6799 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6801 case OPC2_32_RRR2_MSUBS_U_64
:
6802 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6803 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6809 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6812 uint32_t r1
, r2
, r3
, r4
, n
;
6814 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6815 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6816 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6817 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6818 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6819 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6822 case OPC2_32_RRR1_MADD_H_LL
:
6823 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6824 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6826 case OPC2_32_RRR1_MADD_H_LU
:
6827 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6828 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6830 case OPC2_32_RRR1_MADD_H_UL
:
6831 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6832 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6834 case OPC2_32_RRR1_MADD_H_UU
:
6835 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6836 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6838 case OPC2_32_RRR1_MADDS_H_LL
:
6839 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6840 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6842 case OPC2_32_RRR1_MADDS_H_LU
:
6843 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6844 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6846 case OPC2_32_RRR1_MADDS_H_UL
:
6847 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6848 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6850 case OPC2_32_RRR1_MADDS_H_UU
:
6851 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6852 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6854 case OPC2_32_RRR1_MADDM_H_LL
:
6855 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6856 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6858 case OPC2_32_RRR1_MADDM_H_LU
:
6859 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6860 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6862 case OPC2_32_RRR1_MADDM_H_UL
:
6863 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6864 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6866 case OPC2_32_RRR1_MADDM_H_UU
:
6867 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6868 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6870 case OPC2_32_RRR1_MADDMS_H_LL
:
6871 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6872 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6874 case OPC2_32_RRR1_MADDMS_H_LU
:
6875 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6876 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6878 case OPC2_32_RRR1_MADDMS_H_UL
:
6879 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6880 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6882 case OPC2_32_RRR1_MADDMS_H_UU
:
6883 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6884 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6886 case OPC2_32_RRR1_MADDR_H_LL
:
6887 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6888 cpu_gpr_d
[r2
], n
, MODE_LL
);
6890 case OPC2_32_RRR1_MADDR_H_LU
:
6891 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6892 cpu_gpr_d
[r2
], n
, MODE_LU
);
6894 case OPC2_32_RRR1_MADDR_H_UL
:
6895 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6896 cpu_gpr_d
[r2
], n
, MODE_UL
);
6898 case OPC2_32_RRR1_MADDR_H_UU
:
6899 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6900 cpu_gpr_d
[r2
], n
, MODE_UU
);
6902 case OPC2_32_RRR1_MADDRS_H_LL
:
6903 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6904 cpu_gpr_d
[r2
], n
, MODE_LL
);
6906 case OPC2_32_RRR1_MADDRS_H_LU
:
6907 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6908 cpu_gpr_d
[r2
], n
, MODE_LU
);
6910 case OPC2_32_RRR1_MADDRS_H_UL
:
6911 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6912 cpu_gpr_d
[r2
], n
, MODE_UL
);
6914 case OPC2_32_RRR1_MADDRS_H_UU
:
6915 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6916 cpu_gpr_d
[r2
], n
, MODE_UU
);
6921 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
6924 uint32_t r1
, r2
, r3
, r4
, n
;
6927 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6928 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6929 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6930 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6931 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6932 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6934 temp
= tcg_const_i32(n
);
6935 temp2
= tcg_temp_new();
6938 case OPC2_32_RRR1_MADD_Q_32
:
6939 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6940 cpu_gpr_d
[r2
], n
, 32, env
);
6942 case OPC2_32_RRR1_MADD_Q_64
:
6943 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6944 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6947 case OPC2_32_RRR1_MADD_Q_32_L
:
6948 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6949 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6952 case OPC2_32_RRR1_MADD_Q_64_L
:
6953 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6954 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6955 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6958 case OPC2_32_RRR1_MADD_Q_32_U
:
6959 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6960 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6963 case OPC2_32_RRR1_MADD_Q_64_U
:
6964 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6965 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6966 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6969 case OPC2_32_RRR1_MADD_Q_32_LL
:
6970 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6971 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6972 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6974 case OPC2_32_RRR1_MADD_Q_64_LL
:
6975 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6976 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6977 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6978 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6980 case OPC2_32_RRR1_MADD_Q_32_UU
:
6981 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6982 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6983 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6985 case OPC2_32_RRR1_MADD_Q_64_UU
:
6986 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6987 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6988 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6989 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6991 case OPC2_32_RRR1_MADDS_Q_32
:
6992 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6993 cpu_gpr_d
[r2
], n
, 32);
6995 case OPC2_32_RRR1_MADDS_Q_64
:
6996 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6997 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7000 case OPC2_32_RRR1_MADDS_Q_32_L
:
7001 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7002 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7005 case OPC2_32_RRR1_MADDS_Q_64_L
:
7006 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7007 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7008 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7011 case OPC2_32_RRR1_MADDS_Q_32_U
:
7012 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7013 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7016 case OPC2_32_RRR1_MADDS_Q_64_U
:
7017 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7018 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7019 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7022 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7023 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7024 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7025 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7027 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7028 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7029 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7030 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7031 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7033 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7034 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7035 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7036 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7038 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7039 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7040 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7041 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7042 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7044 case OPC2_32_RRR1_MADDR_H_64_UL
:
7045 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7046 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7048 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7049 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7050 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7052 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7053 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7054 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7055 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7057 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7058 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7059 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7060 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7062 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7063 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7064 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7065 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7067 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7068 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7069 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7070 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7073 tcg_temp_free(temp
);
7074 tcg_temp_free(temp2
);
7077 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7080 uint32_t r1
, r2
, r3
, r4
, n
;
7082 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7083 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7084 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7085 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7086 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7087 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7090 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7091 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7092 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7094 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7095 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7096 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7098 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7099 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7100 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7102 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7103 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7104 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7106 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7107 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7108 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7111 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7112 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7113 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7116 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7117 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7118 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7121 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7122 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7123 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7126 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7127 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7128 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7131 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7132 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7133 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7136 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7137 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7138 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7141 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7142 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7143 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7146 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7147 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7148 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7151 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7152 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7153 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7156 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7157 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7158 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7161 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7162 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7163 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7166 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7167 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7168 cpu_gpr_d
[r2
], n
, MODE_LL
);
7170 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7171 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7172 cpu_gpr_d
[r2
], n
, MODE_LU
);
7174 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7175 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7176 cpu_gpr_d
[r2
], n
, MODE_UL
);
7178 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7179 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7180 cpu_gpr_d
[r2
], n
, MODE_UU
);
7182 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7183 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7184 cpu_gpr_d
[r2
], n
, MODE_LL
);
7186 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7187 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7188 cpu_gpr_d
[r2
], n
, MODE_LU
);
7190 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7191 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7192 cpu_gpr_d
[r2
], n
, MODE_UL
);
7194 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7195 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7196 cpu_gpr_d
[r2
], n
, MODE_UU
);
7201 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
7204 uint32_t r1
, r2
, r3
, r4
, n
;
7206 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7207 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7208 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7209 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7210 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7211 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7214 case OPC2_32_RRR1_MSUB_H_LL
:
7215 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7216 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7218 case OPC2_32_RRR1_MSUB_H_LU
:
7219 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7220 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7222 case OPC2_32_RRR1_MSUB_H_UL
:
7223 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7224 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7226 case OPC2_32_RRR1_MSUB_H_UU
:
7227 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7228 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7230 case OPC2_32_RRR1_MSUBS_H_LL
:
7231 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7232 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7234 case OPC2_32_RRR1_MSUBS_H_LU
:
7235 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7236 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7238 case OPC2_32_RRR1_MSUBS_H_UL
:
7239 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7240 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7242 case OPC2_32_RRR1_MSUBS_H_UU
:
7243 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7244 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7246 case OPC2_32_RRR1_MSUBM_H_LL
:
7247 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7248 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7250 case OPC2_32_RRR1_MSUBM_H_LU
:
7251 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7252 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7254 case OPC2_32_RRR1_MSUBM_H_UL
:
7255 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7256 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7258 case OPC2_32_RRR1_MSUBM_H_UU
:
7259 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7260 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7262 case OPC2_32_RRR1_MSUBMS_H_LL
:
7263 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7264 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7266 case OPC2_32_RRR1_MSUBMS_H_LU
:
7267 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7268 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7270 case OPC2_32_RRR1_MSUBMS_H_UL
:
7271 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7272 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7274 case OPC2_32_RRR1_MSUBMS_H_UU
:
7275 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7276 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7278 case OPC2_32_RRR1_MSUBR_H_LL
:
7279 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7280 cpu_gpr_d
[r2
], n
, MODE_LL
);
7282 case OPC2_32_RRR1_MSUBR_H_LU
:
7283 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7284 cpu_gpr_d
[r2
], n
, MODE_LU
);
7286 case OPC2_32_RRR1_MSUBR_H_UL
:
7287 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7288 cpu_gpr_d
[r2
], n
, MODE_UL
);
7290 case OPC2_32_RRR1_MSUBR_H_UU
:
7291 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7292 cpu_gpr_d
[r2
], n
, MODE_UU
);
7294 case OPC2_32_RRR1_MSUBRS_H_LL
:
7295 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7296 cpu_gpr_d
[r2
], n
, MODE_LL
);
7298 case OPC2_32_RRR1_MSUBRS_H_LU
:
7299 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7300 cpu_gpr_d
[r2
], n
, MODE_LU
);
7302 case OPC2_32_RRR1_MSUBRS_H_UL
:
7303 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7304 cpu_gpr_d
[r2
], n
, MODE_UL
);
7306 case OPC2_32_RRR1_MSUBRS_H_UU
:
7307 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7308 cpu_gpr_d
[r2
], n
, MODE_UU
);
7313 static void decode_rrr1_msubq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7316 uint32_t r1
, r2
, r3
, r4
, n
;
7319 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7320 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7321 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7322 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7323 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7324 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7326 temp
= tcg_const_i32(n
);
7327 temp2
= tcg_temp_new();
7330 case OPC2_32_RRR1_MSUB_Q_32
:
7331 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7332 cpu_gpr_d
[r2
], n
, 32, env
);
7334 case OPC2_32_RRR1_MSUB_Q_64
:
7335 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7336 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7339 case OPC2_32_RRR1_MSUB_Q_32_L
:
7340 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7341 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7344 case OPC2_32_RRR1_MSUB_Q_64_L
:
7345 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7346 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7347 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7350 case OPC2_32_RRR1_MSUB_Q_32_U
:
7351 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7352 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7355 case OPC2_32_RRR1_MSUB_Q_64_U
:
7356 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7357 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7358 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7361 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7362 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7363 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7364 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7366 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7367 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7368 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7369 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7370 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7372 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7373 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7374 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7375 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7377 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7378 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7379 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7380 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7381 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7383 case OPC2_32_RRR1_MSUBS_Q_32
:
7384 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7385 cpu_gpr_d
[r2
], n
, 32);
7387 case OPC2_32_RRR1_MSUBS_Q_64
:
7388 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7389 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7392 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7393 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7394 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7397 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7398 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7399 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7400 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7403 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7404 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7405 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7408 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7409 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7410 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7411 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7414 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7415 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7416 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7417 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7419 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7420 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7421 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7422 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7423 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7425 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7426 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7427 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7428 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7430 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7431 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7432 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7433 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7434 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7436 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7437 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7438 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7440 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7441 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7442 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7444 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7445 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7446 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7447 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7449 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7450 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7451 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7452 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7454 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7455 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7456 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7457 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7459 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7460 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7461 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7462 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7465 tcg_temp_free(temp
);
7466 tcg_temp_free(temp2
);
7469 static void decode_rrr1_msubad_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7472 uint32_t r1
, r2
, r3
, r4
, n
;
7474 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7475 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7476 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7477 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7478 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7479 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7482 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7483 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7484 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7486 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7487 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7488 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7490 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7491 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7492 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7494 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7495 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7496 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7498 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7499 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7500 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7503 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7504 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7505 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7508 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7509 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7510 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7513 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7514 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7515 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7518 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7519 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7520 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7523 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7524 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7525 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7528 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7529 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7530 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7533 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7534 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7535 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7538 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7539 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7540 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7543 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7544 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7545 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7548 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7549 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7550 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7553 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7554 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7555 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7558 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7559 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7560 cpu_gpr_d
[r2
], n
, MODE_LL
);
7562 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7563 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7564 cpu_gpr_d
[r2
], n
, MODE_LU
);
7566 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7567 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7568 cpu_gpr_d
[r2
], n
, MODE_UL
);
7570 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7571 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7572 cpu_gpr_d
[r2
], n
, MODE_UU
);
7574 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7575 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7576 cpu_gpr_d
[r2
], n
, MODE_LL
);
7578 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7579 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7580 cpu_gpr_d
[r2
], n
, MODE_LU
);
7582 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7583 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7584 cpu_gpr_d
[r2
], n
, MODE_UL
);
7586 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7587 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7588 cpu_gpr_d
[r2
], n
, MODE_UU
);
7594 static void decode_rrrr_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7598 TCGv tmp_width
, tmp_pos
;
7600 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7601 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7602 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7603 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7604 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7606 tmp_pos
= tcg_temp_new();
7607 tmp_width
= tcg_temp_new();
7610 case OPC2_32_RRRR_DEXTR
:
7611 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7613 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7615 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7616 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7617 tcg_gen_shr_tl(tmp_pos
, cpu_gpr_d
[r2
], tmp_pos
);
7618 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, tmp_pos
);
7621 case OPC2_32_RRRR_EXTR
:
7622 case OPC2_32_RRRR_EXTR_U
:
7623 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7624 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7625 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7626 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7627 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7628 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7629 if (op2
== OPC2_32_RRRR_EXTR
) {
7630 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7632 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7635 case OPC2_32_RRRR_INSERT
:
7636 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7637 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7638 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7642 tcg_temp_free(tmp_pos
);
7643 tcg_temp_free(tmp_width
);
7647 static void decode_rrrw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7655 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7656 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7657 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7658 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7659 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7660 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7662 temp
= tcg_temp_new();
7665 case OPC2_32_RRRW_EXTR
:
7666 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7667 tcg_gen_addi_tl(temp
, temp
, width
);
7668 tcg_gen_subfi_tl(temp
, 32, temp
);
7669 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7670 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7672 case OPC2_32_RRRW_EXTR_U
:
7674 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7676 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7677 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7678 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7681 case OPC2_32_RRRW_IMASK
:
7682 temp2
= tcg_temp_new();
7684 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7685 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7686 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7687 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7688 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7690 tcg_temp_free(temp2
);
7692 case OPC2_32_RRRW_INSERT
:
7693 temp2
= tcg_temp_new();
7695 tcg_gen_movi_tl(temp
, width
);
7696 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7697 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7699 tcg_temp_free(temp2
);
7702 tcg_temp_free(temp
);
7706 static void decode_sys_interrupts(CPUTriCoreState
*env
, DisasContext
*ctx
)
7712 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7715 case OPC2_32_SYS_DEBUG
:
7716 /* raise EXCP_DEBUG */
7718 case OPC2_32_SYS_DISABLE
:
7719 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE
);
7721 case OPC2_32_SYS_DSYNC
:
7723 case OPC2_32_SYS_ENABLE
:
7724 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE
);
7726 case OPC2_32_SYS_ISYNC
:
7728 case OPC2_32_SYS_NOP
:
7730 case OPC2_32_SYS_RET
:
7731 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7733 case OPC2_32_SYS_RFE
:
7734 gen_helper_rfe(cpu_env
);
7736 ctx
->bstate
= BS_BRANCH
;
7738 case OPC2_32_SYS_RFM
:
7739 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7740 tmp
= tcg_temp_new();
7741 l1
= gen_new_label();
7743 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7744 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7745 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7746 gen_helper_rfm(cpu_env
);
7749 ctx
->bstate
= BS_BRANCH
;
7752 /* generate privilege trap */
7755 case OPC2_32_SYS_RSLCX
:
7756 gen_helper_rslcx(cpu_env
);
7758 case OPC2_32_SYS_SVLCX
:
7759 gen_helper_svlcx(cpu_env
);
7761 case OPC2_32_SYS_TRAPSV
:
7762 /* TODO: raise sticky overflow trap */
7764 case OPC2_32_SYS_TRAPV
:
7765 /* TODO: raise overflow trap */
7770 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
7774 int32_t address
, const16
;
7777 TCGv temp
, temp2
, temp3
;
7779 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7781 /* handle JNZ.T opcode only being 7 bit long */
7782 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7783 op1
= OPCM_32_BRN_JTT
;
7788 case OPCM_32_ABS_LDW
:
7789 decode_abs_ldw(env
, ctx
);
7791 case OPCM_32_ABS_LDB
:
7792 decode_abs_ldb(env
, ctx
);
7794 case OPCM_32_ABS_LDMST_SWAP
:
7795 decode_abs_ldst_swap(env
, ctx
);
7797 case OPCM_32_ABS_LDST_CONTEXT
:
7798 decode_abs_ldst_context(env
, ctx
);
7800 case OPCM_32_ABS_STORE
:
7801 decode_abs_store(env
, ctx
);
7803 case OPCM_32_ABS_STOREB_H
:
7804 decode_abs_storeb_h(env
, ctx
);
7806 case OPC1_32_ABS_STOREQ
:
7807 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7808 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7809 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7810 temp2
= tcg_temp_new();
7812 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7813 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7815 tcg_temp_free(temp2
);
7816 tcg_temp_free(temp
);
7818 case OPC1_32_ABS_LD_Q
:
7819 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7820 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7821 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7823 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7824 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7826 tcg_temp_free(temp
);
7828 case OPC1_32_ABS_LEA
:
7829 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7830 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7831 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7834 case OPC1_32_ABSB_ST_T
:
7835 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7836 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7837 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7839 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7840 temp2
= tcg_temp_new();
7842 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7843 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7844 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7845 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7847 tcg_temp_free(temp
);
7848 tcg_temp_free(temp2
);
7851 case OPC1_32_B_CALL
:
7852 case OPC1_32_B_CALLA
:
7857 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
7858 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
7861 case OPCM_32_BIT_ANDACC
:
7862 decode_bit_andacc(env
, ctx
);
7864 case OPCM_32_BIT_LOGICAL_T1
:
7865 decode_bit_logical_t(env
, ctx
);
7867 case OPCM_32_BIT_INSERT
:
7868 decode_bit_insert(env
, ctx
);
7870 case OPCM_32_BIT_LOGICAL_T2
:
7871 decode_bit_logical_t2(env
, ctx
);
7873 case OPCM_32_BIT_ORAND
:
7874 decode_bit_orand(env
, ctx
);
7876 case OPCM_32_BIT_SH_LOGIC1
:
7877 decode_bit_sh_logic1(env
, ctx
);
7879 case OPCM_32_BIT_SH_LOGIC2
:
7880 decode_bit_sh_logic2(env
, ctx
);
7883 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
7884 decode_bo_addrmode_post_pre_base(env
, ctx
);
7886 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
7887 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
7889 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
7890 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
7892 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
7893 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
7895 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
7896 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
7898 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
7899 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
7902 case OPC1_32_BOL_LD_A_LONGOFF
:
7903 case OPC1_32_BOL_LD_W_LONGOFF
:
7904 case OPC1_32_BOL_LEA_LONGOFF
:
7905 case OPC1_32_BOL_ST_W_LONGOFF
:
7906 case OPC1_32_BOL_ST_A_LONGOFF
:
7907 case OPC1_32_BOL_LD_B_LONGOFF
:
7908 case OPC1_32_BOL_LD_BU_LONGOFF
:
7909 case OPC1_32_BOL_LD_H_LONGOFF
:
7910 case OPC1_32_BOL_LD_HU_LONGOFF
:
7911 case OPC1_32_BOL_ST_B_LONGOFF
:
7912 case OPC1_32_BOL_ST_H_LONGOFF
:
7913 decode_bol_opc(env
, ctx
, op1
);
7916 case OPCM_32_BRC_EQ_NEQ
:
7917 case OPCM_32_BRC_GE
:
7918 case OPCM_32_BRC_JLT
:
7919 case OPCM_32_BRC_JNE
:
7920 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
7921 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
7922 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
7923 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
7926 case OPCM_32_BRN_JTT
:
7927 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
7928 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
7929 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
7932 case OPCM_32_BRR_EQ_NEQ
:
7933 case OPCM_32_BRR_ADDR_EQ_NEQ
:
7934 case OPCM_32_BRR_GE
:
7935 case OPCM_32_BRR_JLT
:
7936 case OPCM_32_BRR_JNE
:
7937 case OPCM_32_BRR_JNZ
:
7938 case OPCM_32_BRR_LOOP
:
7939 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
7940 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
7941 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
7942 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
7945 case OPCM_32_RC_LOGICAL_SHIFT
:
7946 decode_rc_logical_shift(env
, ctx
);
7948 case OPCM_32_RC_ACCUMULATOR
:
7949 decode_rc_accumulator(env
, ctx
);
7951 case OPCM_32_RC_SERVICEROUTINE
:
7952 decode_rc_serviceroutine(env
, ctx
);
7954 case OPCM_32_RC_MUL
:
7955 decode_rc_mul(env
, ctx
);
7958 case OPCM_32_RCPW_MASK_INSERT
:
7959 decode_rcpw_insert(env
, ctx
);
7962 case OPC1_32_RCRR_INSERT
:
7963 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
7964 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
7965 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
7966 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
7967 temp
= tcg_const_i32(const16
);
7968 temp2
= tcg_temp_new(); /* width*/
7969 temp3
= tcg_temp_new(); /* pos */
7971 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
7972 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
7974 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
7976 tcg_temp_free(temp
);
7977 tcg_temp_free(temp2
);
7978 tcg_temp_free(temp3
);
7981 case OPCM_32_RCRW_MASK_INSERT
:
7982 decode_rcrw_insert(env
, ctx
);
7985 case OPCM_32_RCR_COND_SELECT
:
7986 decode_rcr_cond_select(env
, ctx
);
7988 case OPCM_32_RCR_MADD
:
7989 decode_rcr_madd(env
, ctx
);
7991 case OPCM_32_RCR_MSUB
:
7992 decode_rcr_msub(env
, ctx
);
7995 case OPC1_32_RLC_ADDI
:
7996 case OPC1_32_RLC_ADDIH
:
7997 case OPC1_32_RLC_ADDIH_A
:
7998 case OPC1_32_RLC_MFCR
:
7999 case OPC1_32_RLC_MOV
:
8000 case OPC1_32_RLC_MOV_64
:
8001 case OPC1_32_RLC_MOV_U
:
8002 case OPC1_32_RLC_MOV_H
:
8003 case OPC1_32_RLC_MOVH_A
:
8004 case OPC1_32_RLC_MTCR
:
8005 decode_rlc_opc(env
, ctx
, op1
);
8008 case OPCM_32_RR_ACCUMULATOR
:
8009 decode_rr_accumulator(env
, ctx
);
8011 case OPCM_32_RR_LOGICAL_SHIFT
:
8012 decode_rr_logical_shift(env
, ctx
);
8014 case OPCM_32_RR_ADDRESS
:
8015 decode_rr_address(env
, ctx
);
8017 case OPCM_32_RR_IDIRECT
:
8018 decode_rr_idirect(env
, ctx
);
8020 case OPCM_32_RR_DIVIDE
:
8021 decode_rr_divide(env
, ctx
);
8024 case OPCM_32_RR1_MUL
:
8025 decode_rr1_mul(env
, ctx
);
8027 case OPCM_32_RR1_MULQ
:
8028 decode_rr1_mulq(env
, ctx
);
8031 case OPCM_32_RR2_MUL
:
8032 decode_rr2_mul(env
, ctx
);
8035 case OPCM_32_RRPW_EXTRACT_INSERT
:
8036 decode_rrpw_extract_insert(env
, ctx
);
8038 case OPC1_32_RRPW_DEXTR
:
8039 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8040 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8041 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8042 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8044 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
8046 temp
= tcg_temp_new();
8047 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], const16
);
8048 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], 32 - const16
);
8049 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
8050 tcg_temp_free(temp
);
8054 case OPCM_32_RRR_COND_SELECT
:
8055 decode_rrr_cond_select(env
, ctx
);
8057 case OPCM_32_RRR_DIVIDE
:
8058 decode_rrr_divide(env
, ctx
);
8060 case OPCM_32_RRR2_MADD
:
8061 decode_rrr2_madd(env
, ctx
);
8063 case OPCM_32_RRR2_MSUB
:
8064 decode_rrr2_msub(env
, ctx
);
8067 case OPCM_32_RRR1_MADD
:
8068 decode_rrr1_madd(env
, ctx
);
8070 case OPCM_32_RRR1_MADDQ_H
:
8071 decode_rrr1_maddq_h(env
, ctx
);
8073 case OPCM_32_RRR1_MADDSU_H
:
8074 decode_rrr1_maddsu_h(env
, ctx
);
8076 case OPCM_32_RRR1_MSUB_H
:
8077 decode_rrr1_msub(env
, ctx
);
8079 case OPCM_32_RRR1_MSUB_Q
:
8080 decode_rrr1_msubq_h(env
, ctx
);
8082 case OPCM_32_RRR1_MSUBAD_H
:
8083 decode_rrr1_msubad_h(env
, ctx
);
8086 case OPCM_32_RRRR_EXTRACT_INSERT
:
8087 decode_rrrr_extract_insert(env
, ctx
);
8089 case OPCM_32_RRRW_EXTRACT_INSERT
:
8090 decode_rrrw_extract_insert(env
, ctx
);
8093 case OPCM_32_SYS_INTERRUPTS
:
8094 decode_sys_interrupts(env
, ctx
);
8096 case OPC1_32_SYS_RSTV
:
8097 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8098 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8099 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8100 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8105 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
8107 /* 16-Bit Instruction */
8108 if ((ctx
->opcode
& 0x1) == 0) {
8109 ctx
->next_pc
= ctx
->pc
+ 2;
8110 decode_16Bit_opc(env
, ctx
);
8111 /* 32-Bit Instruction */
8113 ctx
->next_pc
= ctx
->pc
+ 4;
8114 decode_32Bit_opc(env
, ctx
);
8119 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
8122 CPUState
*cs
= CPU(cpu
);
8123 CPUTriCoreState
*env
= &cpu
->env
;
8125 target_ulong pc_start
;
8129 qemu_log("search pc %d\n", search_pc
);
8137 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
8138 ctx
.bstate
= BS_NONE
;
8139 ctx
.mem_idx
= cpu_mmu_index(env
);
8141 tcg_clear_temp_count();
8143 while (ctx
.bstate
== BS_NONE
) {
8144 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
8145 decode_opc(env
, &ctx
, 0);
8149 if (tcg_op_buf_full()) {
8150 gen_save_pc(ctx
.next_pc
);
8155 gen_save_pc(ctx
.next_pc
);
8159 ctx
.pc
= ctx
.next_pc
;
8162 gen_tb_end(tb
, num_insns
);
8164 printf("done_generating search pc\n");
8166 tb
->size
= ctx
.pc
- pc_start
;
8167 tb
->icount
= num_insns
;
8169 if (tcg_check_temp_count()) {
8170 printf("LEAK at %08x\n", env
->PC
);
8174 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8175 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8176 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
8183 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8185 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
8189 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8191 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
8195 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
8197 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
8205 void cpu_state_reset(CPUTriCoreState
*env
)
8207 /* Reset Regs to Default Value */
8211 static void tricore_tcg_init_csfr(void)
8213 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
8214 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8215 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
8216 offsetof(CPUTriCoreState
, PSW
), "PSW");
8217 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
8218 offsetof(CPUTriCoreState
, PC
), "PC");
8219 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
8220 offsetof(CPUTriCoreState
, ICR
), "ICR");
8223 void tricore_tcg_init(void)
8230 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8232 for (i
= 0 ; i
< 16 ; i
++) {
8233 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
8234 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8237 for (i
= 0 ; i
< 16 ; i
++) {
8238 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
8239 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8242 tricore_tcg_init_csfr();
8243 /* init PSW flag cache */
8244 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
8245 offsetof(CPUTriCoreState
, PSW_USB_C
),
8247 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
8248 offsetof(CPUTriCoreState
, PSW_USB_V
),
8250 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
8251 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8253 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
8254 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8256 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
8257 offsetof(CPUTriCoreState
, PSW_USB_SAV
),