2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
205 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
206 ((offset & 0x0fffff) << 1))
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, TCGMemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
219 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
233 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
235 tcg_temp_free_i64(temp
);
238 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
241 TCGv temp
= tcg_temp_new();
242 tcg_gen_addi_tl(temp
, base
, con
);
243 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
247 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
249 TCGv_i64 temp
= tcg_temp_new_i64();
251 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
252 /* write back to two 32 bit regs */
253 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
255 tcg_temp_free_i64(temp
);
258 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, base
, con
);
263 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
267 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
277 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
288 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
290 TCGv temp
= tcg_temp_new();
291 TCGv temp2
= tcg_temp_new();
293 /* temp = (M(EA, word) */
294 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* temp = temp & ~E[a][63:32]) */
296 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
297 /* temp2 = (E[a][31:0] & E[a][63:32]); */
298 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
299 /* temp = temp | temp2; */
300 tcg_gen_or_tl(temp
, temp
, temp2
);
301 /* M(EA, word) = temp; */
302 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_temp_free(temp2
);
308 /* tmp = M(EA, word);
311 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
315 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
322 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
324 TCGv temp
= tcg_temp_new();
325 TCGv temp2
= tcg_temp_new();
326 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
327 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
328 cpu_gpr_d
[reg
], temp
);
329 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
330 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
333 tcg_temp_free(temp2
);
336 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
338 TCGv temp
= tcg_temp_new();
339 TCGv temp2
= tcg_temp_new();
340 TCGv temp3
= tcg_temp_new();
342 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
343 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
344 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
345 tcg_gen_or_tl(temp2
, temp2
, temp3
);
346 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
347 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
350 tcg_temp_free(temp2
);
351 tcg_temp_free(temp3
);
355 /* We generate loads and store to core special function register (csfr) through
356 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
357 makros R, A and E, which allow read-only, all and endinit protected access.
358 These makros also specify in which ISA version the csfr was introduced. */
359 #define R(ADDRESS, REG, FEATURE) \
361 if (tricore_feature(env, FEATURE)) { \
362 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
365 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
366 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
367 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
369 /* since we're caching PSW make this a special case */
370 if (offset
== 0xfe04) {
371 gen_helper_psw_read(ret
, cpu_env
);
382 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
383 since no execption occurs */
384 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
386 if (tricore_feature(env, FEATURE)) { \
387 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
390 /* Endinit protected registers
391 TODO: Since the endinit bit is in a register of a not yet implemented
392 watchdog device, we handle endinit protected registers like
393 all-access registers for now. */
394 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
395 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
398 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
399 /* since we're caching PSW make this a special case */
400 if (offset
== 0xfe04) {
401 gen_helper_psw_write(cpu_env
, r1
);
408 /* generate privilege trap */
412 /* Functions for arithmetic instructions */
414 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
416 TCGv t0
= tcg_temp_new_i32();
417 TCGv result
= tcg_temp_new_i32();
418 /* Addition and set V/SV bits */
419 tcg_gen_add_tl(result
, r1
, r2
);
421 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
422 tcg_gen_xor_tl(t0
, r1
, r2
);
423 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
425 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
426 /* Calc AV/SAV bits */
427 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
428 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
430 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
431 /* write back result */
432 tcg_gen_mov_tl(ret
, result
);
434 tcg_temp_free(result
);
439 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
441 TCGv temp
= tcg_temp_new();
442 TCGv_i64 t0
= tcg_temp_new_i64();
443 TCGv_i64 t1
= tcg_temp_new_i64();
444 TCGv_i64 result
= tcg_temp_new_i64();
446 tcg_gen_add_i64(result
, r1
, r2
);
448 tcg_gen_xor_i64(t1
, result
, r1
);
449 tcg_gen_xor_i64(t0
, r1
, r2
);
450 tcg_gen_andc_i64(t1
, t1
, t0
);
451 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
453 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
454 /* calc AV/SAV bits */
455 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
456 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
457 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
459 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
460 /* write back result */
461 tcg_gen_mov_i64(ret
, result
);
464 tcg_temp_free_i64(result
);
465 tcg_temp_free_i64(t0
);
466 tcg_temp_free_i64(t1
);
470 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
471 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
472 void(*op2
)(TCGv
, TCGv
, TCGv
))
474 TCGv temp
= tcg_temp_new();
475 TCGv temp2
= tcg_temp_new();
476 TCGv temp3
= tcg_temp_new();
477 TCGv temp4
= tcg_temp_new();
479 (*op1
)(temp
, r1_low
, r2
);
481 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
482 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
483 if (op1
== tcg_gen_add_tl
) {
484 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
486 tcg_gen_and_tl(temp2
, temp2
, temp3
);
489 (*op2
)(temp3
, r1_high
, r3
);
491 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
492 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
493 if (op2
== tcg_gen_add_tl
) {
494 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
496 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
498 /* combine V0/V1 bits */
499 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
501 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
503 tcg_gen_mov_tl(ret_low
, temp
);
504 tcg_gen_mov_tl(ret_high
, temp3
);
506 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
507 tcg_gen_xor_tl(temp
, temp
, ret_low
);
508 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
509 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
510 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
512 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
515 tcg_temp_free(temp2
);
516 tcg_temp_free(temp3
);
517 tcg_temp_free(temp4
);
520 /* ret = r2 + (r1 * r3); */
521 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
523 TCGv_i64 t1
= tcg_temp_new_i64();
524 TCGv_i64 t2
= tcg_temp_new_i64();
525 TCGv_i64 t3
= tcg_temp_new_i64();
527 tcg_gen_ext_i32_i64(t1
, r1
);
528 tcg_gen_ext_i32_i64(t2
, r2
);
529 tcg_gen_ext_i32_i64(t3
, r3
);
531 tcg_gen_mul_i64(t1
, t1
, t3
);
532 tcg_gen_add_i64(t1
, t2
, t1
);
534 tcg_gen_trunc_i64_i32(ret
, t1
);
537 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
538 /* t1 < -0x80000000 */
539 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
540 tcg_gen_or_i64(t2
, t2
, t3
);
541 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
542 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
544 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
545 /* Calc AV/SAV bits */
546 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
547 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
549 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
551 tcg_temp_free_i64(t1
);
552 tcg_temp_free_i64(t2
);
553 tcg_temp_free_i64(t3
);
556 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
558 TCGv temp
= tcg_const_i32(con
);
559 gen_madd32_d(ret
, r1
, r2
, temp
);
564 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
567 TCGv t1
= tcg_temp_new();
568 TCGv t2
= tcg_temp_new();
569 TCGv t3
= tcg_temp_new();
570 TCGv t4
= tcg_temp_new();
572 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
573 /* only the add can overflow */
574 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
576 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
577 tcg_gen_xor_tl(t1
, r2_high
, t2
);
578 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
580 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
581 /* Calc AV/SAV bits */
582 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
583 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
585 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
586 /* write back the result */
587 tcg_gen_mov_tl(ret_low
, t3
);
588 tcg_gen_mov_tl(ret_high
, t4
);
597 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
600 TCGv_i64 t1
= tcg_temp_new_i64();
601 TCGv_i64 t2
= tcg_temp_new_i64();
602 TCGv_i64 t3
= tcg_temp_new_i64();
604 tcg_gen_extu_i32_i64(t1
, r1
);
605 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
606 tcg_gen_extu_i32_i64(t3
, r3
);
608 tcg_gen_mul_i64(t1
, t1
, t3
);
609 tcg_gen_add_i64(t2
, t2
, t1
);
610 /* write back result */
611 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
612 /* only the add overflows, if t2 < t1
614 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
615 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
616 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
618 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
619 /* Calc AV/SAV bits */
620 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
621 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
623 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
625 tcg_temp_free_i64(t1
);
626 tcg_temp_free_i64(t2
);
627 tcg_temp_free_i64(t3
);
631 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
634 TCGv temp
= tcg_const_i32(con
);
635 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
640 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
643 TCGv temp
= tcg_const_i32(con
);
644 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
649 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
650 TCGv r3
, uint32_t n
, uint32_t mode
)
652 TCGv temp
= tcg_const_i32(n
);
653 TCGv temp2
= tcg_temp_new();
654 TCGv_i64 temp64
= tcg_temp_new_i64();
657 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
660 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
663 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
666 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
669 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
670 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
671 tcg_gen_add_tl
, tcg_gen_add_tl
);
673 tcg_temp_free(temp2
);
674 tcg_temp_free_i64(temp64
);
678 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
679 TCGv r3
, uint32_t n
, uint32_t mode
)
681 TCGv temp
= tcg_const_i32(n
);
682 TCGv temp2
= tcg_temp_new();
683 TCGv_i64 temp64
= tcg_temp_new_i64();
686 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
689 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
692 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
695 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
698 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
699 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
700 tcg_gen_sub_tl
, tcg_gen_add_tl
);
702 tcg_temp_free(temp2
);
703 tcg_temp_free_i64(temp64
);
707 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
708 TCGv r3
, uint32_t n
, uint32_t mode
)
710 TCGv temp
= tcg_const_i32(n
);
711 TCGv_i64 temp64
= tcg_temp_new_i64();
712 TCGv_i64 temp64_2
= tcg_temp_new_i64();
713 TCGv_i64 temp64_3
= tcg_temp_new_i64();
716 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
719 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
722 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
725 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
728 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
729 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
730 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
731 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
732 tcg_gen_shli_i64(temp64
, temp64
, 16);
734 gen_add64_d(temp64_2
, temp64_3
, temp64
);
735 /* write back result */
736 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
739 tcg_temp_free_i64(temp64
);
740 tcg_temp_free_i64(temp64_2
);
741 tcg_temp_free_i64(temp64_3
);
744 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
747 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
748 TCGv r3
, uint32_t n
, uint32_t mode
)
750 TCGv temp
= tcg_const_i32(n
);
751 TCGv temp2
= tcg_temp_new();
752 TCGv temp3
= tcg_temp_new();
753 TCGv_i64 temp64
= tcg_temp_new_i64();
757 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
760 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
763 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
766 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
769 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
770 gen_adds(ret_low
, r1_low
, temp
);
771 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
772 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
773 gen_adds(ret_high
, r1_high
, temp2
);
775 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
776 /* combine av bits */
777 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
780 tcg_temp_free(temp2
);
781 tcg_temp_free(temp3
);
782 tcg_temp_free_i64(temp64
);
786 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
789 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
790 TCGv r3
, uint32_t n
, uint32_t mode
)
792 TCGv temp
= tcg_const_i32(n
);
793 TCGv temp2
= tcg_temp_new();
794 TCGv temp3
= tcg_temp_new();
795 TCGv_i64 temp64
= tcg_temp_new_i64();
799 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
802 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
805 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
808 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
811 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
812 gen_subs(ret_low
, r1_low
, temp
);
813 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
814 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
815 gen_adds(ret_high
, r1_high
, temp2
);
817 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
818 /* combine av bits */
819 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
822 tcg_temp_free(temp2
);
823 tcg_temp_free(temp3
);
824 tcg_temp_free_i64(temp64
);
829 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
830 TCGv r3
, uint32_t n
, uint32_t mode
)
832 TCGv temp
= tcg_const_i32(n
);
833 TCGv_i64 temp64
= tcg_temp_new_i64();
834 TCGv_i64 temp64_2
= tcg_temp_new_i64();
838 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
841 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
844 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
847 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
850 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
851 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
852 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
853 tcg_gen_shli_i64(temp64
, temp64
, 16);
854 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
856 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
857 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
860 tcg_temp_free_i64(temp64
);
861 tcg_temp_free_i64(temp64_2
);
866 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
867 TCGv r3
, uint32_t n
, uint32_t mode
)
869 TCGv temp
= tcg_const_i32(n
);
870 TCGv_i64 temp64
= tcg_temp_new_i64();
871 TCGv_i64 temp64_2
= tcg_temp_new_i64();
872 TCGv_i64 temp64_3
= tcg_temp_new_i64();
875 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
878 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
881 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
884 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
887 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
888 gen_add64_d(temp64_3
, temp64_2
, temp64
);
889 /* write back result */
890 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
893 tcg_temp_free_i64(temp64
);
894 tcg_temp_free_i64(temp64_2
);
895 tcg_temp_free_i64(temp64_3
);
899 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
900 TCGv r3
, uint32_t n
, uint32_t mode
)
902 TCGv temp
= tcg_const_i32(n
);
903 TCGv_i64 temp64
= tcg_temp_new_i64();
904 TCGv_i64 temp64_2
= tcg_temp_new_i64();
907 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
910 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
913 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
916 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
919 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
920 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
921 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
924 tcg_temp_free_i64(temp64
);
925 tcg_temp_free_i64(temp64_2
);
929 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
932 TCGv temp
= tcg_const_i32(n
);
933 TCGv_i64 temp64
= tcg_temp_new_i64();
936 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
939 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
942 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
945 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
948 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
951 tcg_temp_free_i64(temp64
);
955 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
957 TCGv temp
= tcg_temp_new();
958 TCGv temp2
= tcg_temp_new();
960 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
961 tcg_gen_shli_tl(temp
, r1
, 16);
962 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
965 tcg_temp_free(temp2
);
969 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
971 TCGv temp
= tcg_const_i32(n
);
972 TCGv temp2
= tcg_temp_new();
973 TCGv_i64 temp64
= tcg_temp_new_i64();
976 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
979 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
982 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
985 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
988 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
989 tcg_gen_shli_tl(temp
, r1
, 16);
990 gen_helper_addsur_h(ret
, cpu_env
, temp64
, temp
, temp2
);
993 tcg_temp_free(temp2
);
994 tcg_temp_free_i64(temp64
);
999 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1000 uint32_t n
, uint32_t mode
)
1002 TCGv temp
= tcg_const_i32(n
);
1003 TCGv_i64 temp64
= tcg_temp_new_i64();
1006 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1009 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1012 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1015 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1018 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1020 tcg_temp_free(temp
);
1021 tcg_temp_free_i64(temp64
);
1025 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1027 TCGv temp
= tcg_temp_new();
1028 TCGv temp2
= tcg_temp_new();
1030 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1031 tcg_gen_shli_tl(temp
, r1
, 16);
1032 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1034 tcg_temp_free(temp
);
1035 tcg_temp_free(temp2
);
1039 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1041 TCGv temp
= tcg_const_i32(n
);
1042 TCGv temp2
= tcg_temp_new();
1043 TCGv_i64 temp64
= tcg_temp_new_i64();
1046 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1049 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1052 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1055 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1058 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1059 tcg_gen_shli_tl(temp
, r1
, 16);
1060 gen_helper_addsur_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
1062 tcg_temp_free(temp
);
1063 tcg_temp_free(temp2
);
1064 tcg_temp_free_i64(temp64
);
1068 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1070 TCGv temp
= tcg_const_i32(n
);
1071 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1072 tcg_temp_free(temp
);
1076 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1078 TCGv temp
= tcg_const_i32(n
);
1079 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1080 tcg_temp_free(temp
);
1084 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1085 uint32_t up_shift
, CPUTriCoreState
*env
)
1087 TCGv temp
= tcg_temp_new();
1088 TCGv temp2
= tcg_temp_new();
1089 TCGv temp3
= tcg_temp_new();
1090 TCGv_i64 t1
= tcg_temp_new_i64();
1091 TCGv_i64 t2
= tcg_temp_new_i64();
1092 TCGv_i64 t3
= tcg_temp_new_i64();
1094 tcg_gen_ext_i32_i64(t2
, arg2
);
1095 tcg_gen_ext_i32_i64(t3
, arg3
);
1097 tcg_gen_mul_i64(t2
, t2
, t3
);
1098 tcg_gen_shli_i64(t2
, t2
, n
);
1100 tcg_gen_ext_i32_i64(t1
, arg1
);
1101 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1103 tcg_gen_add_i64(t3
, t1
, t2
);
1104 tcg_gen_trunc_i64_i32(temp3
, t3
);
1106 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1107 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1108 tcg_gen_or_i64(t1
, t1
, t2
);
1109 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1110 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1111 /* We produce an overflow on the host if the mul before was
1112 (0x80000000 * 0x80000000) << 1). If this is the
1113 case, we negate the ovf. */
1115 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1116 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1117 tcg_gen_and_tl(temp
, temp
, temp2
);
1118 tcg_gen_shli_tl(temp
, temp
, 31);
1119 /* negate v bit, if special condition */
1120 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1123 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1124 /* Calc AV/SAV bits */
1125 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1126 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1128 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1129 /* write back result */
1130 tcg_gen_mov_tl(ret
, temp3
);
1132 tcg_temp_free(temp
);
1133 tcg_temp_free(temp2
);
1134 tcg_temp_free(temp3
);
1135 tcg_temp_free_i64(t1
);
1136 tcg_temp_free_i64(t2
);
1137 tcg_temp_free_i64(t3
);
1141 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1143 TCGv temp
= tcg_temp_new();
1144 TCGv temp2
= tcg_temp_new();
1146 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1147 } else { /* n is expected to be 1 */
1148 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1149 tcg_gen_shli_tl(temp
, temp
, 1);
1150 /* catch special case r1 = r2 = 0x8000 */
1151 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1152 tcg_gen_sub_tl(temp
, temp
, temp2
);
1154 gen_add_d(ret
, arg1
, temp
);
1156 tcg_temp_free(temp
);
1157 tcg_temp_free(temp2
);
1161 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1163 TCGv temp
= tcg_temp_new();
1164 TCGv temp2
= tcg_temp_new();
1166 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1167 } else { /* n is expected to be 1 */
1168 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1169 tcg_gen_shli_tl(temp
, temp
, 1);
1170 /* catch special case r1 = r2 = 0x8000 */
1171 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1172 tcg_gen_sub_tl(temp
, temp
, temp2
);
1174 gen_adds(ret
, arg1
, temp
);
1176 tcg_temp_free(temp
);
1177 tcg_temp_free(temp2
);
1181 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1182 TCGv arg3
, uint32_t n
)
1184 TCGv temp
= tcg_temp_new();
1185 TCGv temp2
= tcg_temp_new();
1186 TCGv_i64 t1
= tcg_temp_new_i64();
1187 TCGv_i64 t2
= tcg_temp_new_i64();
1188 TCGv_i64 t3
= tcg_temp_new_i64();
1191 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1192 } else { /* n is expected to be 1 */
1193 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1194 tcg_gen_shli_tl(temp
, temp
, 1);
1195 /* catch special case r1 = r2 = 0x8000 */
1196 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1197 tcg_gen_sub_tl(temp
, temp
, temp2
);
1199 tcg_gen_ext_i32_i64(t2
, temp
);
1200 tcg_gen_shli_i64(t2
, t2
, 16);
1201 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1202 gen_add64_d(t3
, t1
, t2
);
1203 /* write back result */
1204 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1206 tcg_temp_free_i64(t1
);
1207 tcg_temp_free_i64(t2
);
1208 tcg_temp_free_i64(t3
);
1209 tcg_temp_free(temp
);
1210 tcg_temp_free(temp2
);
1214 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1215 TCGv arg3
, uint32_t n
)
1217 TCGv temp
= tcg_temp_new();
1218 TCGv temp2
= tcg_temp_new();
1219 TCGv_i64 t1
= tcg_temp_new_i64();
1220 TCGv_i64 t2
= tcg_temp_new_i64();
1223 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1224 } else { /* n is expected to be 1 */
1225 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1226 tcg_gen_shli_tl(temp
, temp
, 1);
1227 /* catch special case r1 = r2 = 0x8000 */
1228 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1229 tcg_gen_sub_tl(temp
, temp
, temp2
);
1231 tcg_gen_ext_i32_i64(t2
, temp
);
1232 tcg_gen_shli_i64(t2
, t2
, 16);
1233 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1235 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
1236 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1238 tcg_temp_free(temp
);
1239 tcg_temp_free(temp2
);
1240 tcg_temp_free_i64(t1
);
1241 tcg_temp_free_i64(t2
);
1245 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1246 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1248 TCGv_i64 t1
= tcg_temp_new_i64();
1249 TCGv_i64 t2
= tcg_temp_new_i64();
1250 TCGv_i64 t3
= tcg_temp_new_i64();
1251 TCGv_i64 t4
= tcg_temp_new_i64();
1254 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1255 tcg_gen_ext_i32_i64(t2
, arg2
);
1256 tcg_gen_ext_i32_i64(t3
, arg3
);
1258 tcg_gen_mul_i64(t2
, t2
, t3
);
1260 tcg_gen_shli_i64(t2
, t2
, 1);
1262 tcg_gen_add_i64(t4
, t1
, t2
);
1264 tcg_gen_xor_i64(t3
, t4
, t1
);
1265 tcg_gen_xor_i64(t2
, t1
, t2
);
1266 tcg_gen_andc_i64(t3
, t3
, t2
);
1267 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
1268 /* We produce an overflow on the host if the mul before was
1269 (0x80000000 * 0x80000000) << 1). If this is the
1270 case, we negate the ovf. */
1272 temp
= tcg_temp_new();
1273 temp2
= tcg_temp_new();
1274 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1275 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1276 tcg_gen_and_tl(temp
, temp
, temp2
);
1277 tcg_gen_shli_tl(temp
, temp
, 31);
1278 /* negate v bit, if special condition */
1279 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1281 tcg_temp_free(temp
);
1282 tcg_temp_free(temp2
);
1284 /* write back result */
1285 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1287 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1288 /* Calc AV/SAV bits */
1289 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1290 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1292 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1294 tcg_temp_free_i64(t1
);
1295 tcg_temp_free_i64(t2
);
1296 tcg_temp_free_i64(t3
);
1297 tcg_temp_free_i64(t4
);
1301 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1304 TCGv_i64 t1
= tcg_temp_new_i64();
1305 TCGv_i64 t2
= tcg_temp_new_i64();
1306 TCGv_i64 t3
= tcg_temp_new_i64();
1308 tcg_gen_ext_i32_i64(t1
, arg1
);
1309 tcg_gen_ext_i32_i64(t2
, arg2
);
1310 tcg_gen_ext_i32_i64(t3
, arg3
);
1312 tcg_gen_mul_i64(t2
, t2
, t3
);
1313 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1315 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1317 tcg_temp_free_i64(t1
);
1318 tcg_temp_free_i64(t2
);
1319 tcg_temp_free_i64(t3
);
1323 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1324 TCGv arg3
, uint32_t n
)
1326 TCGv_i64 r1
= tcg_temp_new_i64();
1327 TCGv temp
= tcg_const_i32(n
);
1329 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1330 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1331 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1333 tcg_temp_free_i64(r1
);
1334 tcg_temp_free(temp
);
1336 /* ret = r2 - (r1 * r3); */
1337 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1339 TCGv_i64 t1
= tcg_temp_new_i64();
1340 TCGv_i64 t2
= tcg_temp_new_i64();
1341 TCGv_i64 t3
= tcg_temp_new_i64();
1343 tcg_gen_ext_i32_i64(t1
, r1
);
1344 tcg_gen_ext_i32_i64(t2
, r2
);
1345 tcg_gen_ext_i32_i64(t3
, r3
);
1347 tcg_gen_mul_i64(t1
, t1
, t3
);
1348 tcg_gen_sub_i64(t1
, t2
, t1
);
1350 tcg_gen_trunc_i64_i32(ret
, t1
);
1353 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1354 /* result < -0x80000000 */
1355 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1356 tcg_gen_or_i64(t2
, t2
, t3
);
1357 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
1358 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1361 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1362 /* Calc AV/SAV bits */
1363 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1364 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1366 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1368 tcg_temp_free_i64(t1
);
1369 tcg_temp_free_i64(t2
);
1370 tcg_temp_free_i64(t3
);
1373 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1375 TCGv temp
= tcg_const_i32(con
);
1376 gen_msub32_d(ret
, r1
, r2
, temp
);
1377 tcg_temp_free(temp
);
1381 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1384 TCGv t1
= tcg_temp_new();
1385 TCGv t2
= tcg_temp_new();
1386 TCGv t3
= tcg_temp_new();
1387 TCGv t4
= tcg_temp_new();
1389 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1390 /* only the sub can overflow */
1391 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1393 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1394 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1395 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1397 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1398 /* Calc AV/SAV bits */
1399 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1400 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1402 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1403 /* write back the result */
1404 tcg_gen_mov_tl(ret_low
, t3
);
1405 tcg_gen_mov_tl(ret_high
, t4
);
1414 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1417 TCGv temp
= tcg_const_i32(con
);
1418 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1419 tcg_temp_free(temp
);
1423 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1426 TCGv_i64 t1
= tcg_temp_new_i64();
1427 TCGv_i64 t2
= tcg_temp_new_i64();
1428 TCGv_i64 t3
= tcg_temp_new_i64();
1430 tcg_gen_extu_i32_i64(t1
, r1
);
1431 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1432 tcg_gen_extu_i32_i64(t3
, r3
);
1434 tcg_gen_mul_i64(t1
, t1
, t3
);
1435 tcg_gen_sub_i64(t3
, t2
, t1
);
1436 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1437 /* calc V bit, only the sub can overflow, if t1 > t2 */
1438 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1439 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1440 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1442 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1443 /* Calc AV/SAV bits */
1444 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1445 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1447 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1449 tcg_temp_free_i64(t1
);
1450 tcg_temp_free_i64(t2
);
1451 tcg_temp_free_i64(t3
);
1455 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1458 TCGv temp
= tcg_const_i32(con
);
1459 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1460 tcg_temp_free(temp
);
1463 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1465 TCGv temp
= tcg_const_i32(r2
);
1466 gen_add_d(ret
, r1
, temp
);
1467 tcg_temp_free(temp
);
1469 /* calculate the carry bit too */
1470 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1472 TCGv t0
= tcg_temp_new_i32();
1473 TCGv result
= tcg_temp_new_i32();
1475 tcg_gen_movi_tl(t0
, 0);
1476 /* Addition and set C/V/SV bits */
1477 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1479 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1480 tcg_gen_xor_tl(t0
, r1
, r2
);
1481 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1483 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1484 /* Calc AV/SAV bits */
1485 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1486 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1488 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1489 /* write back result */
1490 tcg_gen_mov_tl(ret
, result
);
1492 tcg_temp_free(result
);
1496 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1498 TCGv temp
= tcg_const_i32(con
);
1499 gen_add_CC(ret
, r1
, temp
);
1500 tcg_temp_free(temp
);
1503 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1505 TCGv carry
= tcg_temp_new_i32();
1506 TCGv t0
= tcg_temp_new_i32();
1507 TCGv result
= tcg_temp_new_i32();
1509 tcg_gen_movi_tl(t0
, 0);
1510 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1511 /* Addition, carry and set C/V/SV bits */
1512 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1513 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1515 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1516 tcg_gen_xor_tl(t0
, r1
, r2
);
1517 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1519 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1520 /* Calc AV/SAV bits */
1521 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1522 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1524 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1525 /* write back result */
1526 tcg_gen_mov_tl(ret
, result
);
1528 tcg_temp_free(result
);
1530 tcg_temp_free(carry
);
1533 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1535 TCGv temp
= tcg_const_i32(con
);
1536 gen_addc_CC(ret
, r1
, temp
);
1537 tcg_temp_free(temp
);
1540 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1543 TCGv temp
= tcg_temp_new();
1544 TCGv temp2
= tcg_temp_new();
1545 TCGv result
= tcg_temp_new();
1546 TCGv mask
= tcg_temp_new();
1547 TCGv t0
= tcg_const_i32(0);
1549 /* create mask for sticky bits */
1550 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1551 tcg_gen_shli_tl(mask
, mask
, 31);
1553 tcg_gen_add_tl(result
, r1
, r2
);
1555 tcg_gen_xor_tl(temp
, result
, r1
);
1556 tcg_gen_xor_tl(temp2
, r1
, r2
);
1557 tcg_gen_andc_tl(temp
, temp
, temp2
);
1558 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1560 tcg_gen_and_tl(temp
, temp
, mask
);
1561 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1563 tcg_gen_add_tl(temp
, result
, result
);
1564 tcg_gen_xor_tl(temp
, temp
, result
);
1565 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1567 tcg_gen_and_tl(temp
, temp
, mask
);
1568 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1569 /* write back result */
1570 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1573 tcg_temp_free(temp
);
1574 tcg_temp_free(temp2
);
1575 tcg_temp_free(result
);
1576 tcg_temp_free(mask
);
1579 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1582 TCGv temp
= tcg_const_i32(r2
);
1583 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1584 tcg_temp_free(temp
);
1587 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1589 TCGv temp
= tcg_temp_new_i32();
1590 TCGv result
= tcg_temp_new_i32();
1592 tcg_gen_sub_tl(result
, r1
, r2
);
1594 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1595 tcg_gen_xor_tl(temp
, r1
, r2
);
1596 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1598 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1600 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1601 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1603 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1604 /* write back result */
1605 tcg_gen_mov_tl(ret
, result
);
1607 tcg_temp_free(temp
);
1608 tcg_temp_free(result
);
1612 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1614 TCGv temp
= tcg_temp_new();
1615 TCGv_i64 t0
= tcg_temp_new_i64();
1616 TCGv_i64 t1
= tcg_temp_new_i64();
1617 TCGv_i64 result
= tcg_temp_new_i64();
1619 tcg_gen_sub_i64(result
, r1
, r2
);
1621 tcg_gen_xor_i64(t1
, result
, r1
);
1622 tcg_gen_xor_i64(t0
, r1
, r2
);
1623 tcg_gen_and_i64(t1
, t1
, t0
);
1624 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
1626 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1627 /* calc AV/SAV bits */
1628 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
1629 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1630 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1632 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1633 /* write back result */
1634 tcg_gen_mov_i64(ret
, result
);
1636 tcg_temp_free(temp
);
1637 tcg_temp_free_i64(result
);
1638 tcg_temp_free_i64(t0
);
1639 tcg_temp_free_i64(t1
);
1642 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1644 TCGv result
= tcg_temp_new();
1645 TCGv temp
= tcg_temp_new();
1647 tcg_gen_sub_tl(result
, r1
, r2
);
1649 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1651 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1652 tcg_gen_xor_tl(temp
, r1
, r2
);
1653 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1655 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1657 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1658 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1660 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1661 /* write back result */
1662 tcg_gen_mov_tl(ret
, result
);
1664 tcg_temp_free(result
);
1665 tcg_temp_free(temp
);
1668 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1670 TCGv temp
= tcg_temp_new();
1671 tcg_gen_not_tl(temp
, r2
);
1672 gen_addc_CC(ret
, r1
, temp
);
1673 tcg_temp_free(temp
);
1676 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1679 TCGv temp
= tcg_temp_new();
1680 TCGv temp2
= tcg_temp_new();
1681 TCGv result
= tcg_temp_new();
1682 TCGv mask
= tcg_temp_new();
1683 TCGv t0
= tcg_const_i32(0);
1685 /* create mask for sticky bits */
1686 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1687 tcg_gen_shli_tl(mask
, mask
, 31);
1689 tcg_gen_sub_tl(result
, r1
, r2
);
1691 tcg_gen_xor_tl(temp
, result
, r1
);
1692 tcg_gen_xor_tl(temp2
, r1
, r2
);
1693 tcg_gen_and_tl(temp
, temp
, temp2
);
1694 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1696 tcg_gen_and_tl(temp
, temp
, mask
);
1697 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1699 tcg_gen_add_tl(temp
, result
, result
);
1700 tcg_gen_xor_tl(temp
, temp
, result
);
1701 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1703 tcg_gen_and_tl(temp
, temp
, mask
);
1704 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1705 /* write back result */
1706 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1709 tcg_temp_free(temp
);
1710 tcg_temp_free(temp2
);
1711 tcg_temp_free(result
);
1712 tcg_temp_free(mask
);
1716 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1717 TCGv r3
, uint32_t n
, uint32_t mode
)
1719 TCGv temp
= tcg_const_i32(n
);
1720 TCGv temp2
= tcg_temp_new();
1721 TCGv_i64 temp64
= tcg_temp_new_i64();
1724 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1727 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1730 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1733 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1736 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1737 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1738 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1739 tcg_temp_free(temp
);
1740 tcg_temp_free(temp2
);
1741 tcg_temp_free_i64(temp64
);
1745 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1746 TCGv r3
, uint32_t n
, uint32_t mode
)
1748 TCGv temp
= tcg_const_i32(n
);
1749 TCGv temp2
= tcg_temp_new();
1750 TCGv temp3
= tcg_temp_new();
1751 TCGv_i64 temp64
= tcg_temp_new_i64();
1755 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1758 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1761 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1764 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1767 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1768 gen_subs(ret_low
, r1_low
, temp
);
1769 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1770 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1771 gen_subs(ret_high
, r1_high
, temp2
);
1772 /* combine v bits */
1773 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1774 /* combine av bits */
1775 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1777 tcg_temp_free(temp
);
1778 tcg_temp_free(temp2
);
1779 tcg_temp_free(temp3
);
1780 tcg_temp_free_i64(temp64
);
1784 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1785 TCGv r3
, uint32_t n
, uint32_t mode
)
1787 TCGv temp
= tcg_const_i32(n
);
1788 TCGv_i64 temp64
= tcg_temp_new_i64();
1789 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1790 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1793 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1796 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1799 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1802 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1805 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1806 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1807 /* write back result */
1808 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1810 tcg_temp_free(temp
);
1811 tcg_temp_free_i64(temp64
);
1812 tcg_temp_free_i64(temp64_2
);
1813 tcg_temp_free_i64(temp64_3
);
1817 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1818 TCGv r3
, uint32_t n
, uint32_t mode
)
1820 TCGv temp
= tcg_const_i32(n
);
1821 TCGv_i64 temp64
= tcg_temp_new_i64();
1822 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1825 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
1828 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
1831 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
1834 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
1837 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1838 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
1839 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1841 tcg_temp_free(temp
);
1842 tcg_temp_free_i64(temp64
);
1843 tcg_temp_free_i64(temp64_2
);
1847 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1850 TCGv temp
= tcg_const_i32(n
);
1851 TCGv_i64 temp64
= tcg_temp_new_i64();
1854 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1857 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1860 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1863 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1866 gen_helper_subr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1868 tcg_temp_free(temp
);
1869 tcg_temp_free_i64(temp64
);
1873 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1875 TCGv temp
= tcg_temp_new();
1876 TCGv temp2
= tcg_temp_new();
1878 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1879 tcg_gen_shli_tl(temp
, r1
, 16);
1880 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1882 tcg_temp_free(temp
);
1883 tcg_temp_free(temp2
);
1887 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1888 uint32_t n
, uint32_t mode
)
1890 TCGv temp
= tcg_const_i32(n
);
1891 TCGv_i64 temp64
= tcg_temp_new_i64();
1894 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
1897 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
1900 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
1903 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
1906 gen_helper_subr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
1908 tcg_temp_free(temp
);
1909 tcg_temp_free_i64(temp64
);
1913 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1915 TCGv temp
= tcg_temp_new();
1916 TCGv temp2
= tcg_temp_new();
1918 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1919 tcg_gen_shli_tl(temp
, r1
, 16);
1920 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1922 tcg_temp_free(temp
);
1923 tcg_temp_free(temp2
);
1927 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1929 TCGv temp
= tcg_const_i32(n
);
1930 gen_helper_msubr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1931 tcg_temp_free(temp
);
1935 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1937 TCGv temp
= tcg_const_i32(n
);
1938 gen_helper_msubr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
1939 tcg_temp_free(temp
);
1943 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1944 uint32_t up_shift
, CPUTriCoreState
*env
)
1946 TCGv temp
= tcg_temp_new();
1947 TCGv temp2
= tcg_temp_new();
1948 TCGv temp3
= tcg_temp_new();
1949 TCGv_i64 t1
= tcg_temp_new_i64();
1950 TCGv_i64 t2
= tcg_temp_new_i64();
1951 TCGv_i64 t3
= tcg_temp_new_i64();
1952 TCGv_i64 t4
= tcg_temp_new_i64();
1954 tcg_gen_ext_i32_i64(t2
, arg2
);
1955 tcg_gen_ext_i32_i64(t3
, arg3
);
1957 tcg_gen_mul_i64(t2
, t2
, t3
);
1959 tcg_gen_ext_i32_i64(t1
, arg1
);
1960 /* if we shift part of the fraction out, we need to round up */
1961 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1962 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1963 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1964 tcg_gen_add_i64(t2
, t2
, t4
);
1966 tcg_gen_sub_i64(t3
, t1
, t2
);
1967 tcg_gen_trunc_i64_i32(temp3
, t3
);
1969 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1970 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1971 tcg_gen_or_i64(t1
, t1
, t2
);
1972 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1973 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1974 /* We produce an overflow on the host if the mul before was
1975 (0x80000000 * 0x80000000) << 1). If this is the
1976 case, we negate the ovf. */
1978 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1979 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1980 tcg_gen_and_tl(temp
, temp
, temp2
);
1981 tcg_gen_shli_tl(temp
, temp
, 31);
1982 /* negate v bit, if special condition */
1983 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1986 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1987 /* Calc AV/SAV bits */
1988 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1989 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1991 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1992 /* write back result */
1993 tcg_gen_mov_tl(ret
, temp3
);
1995 tcg_temp_free(temp
);
1996 tcg_temp_free(temp2
);
1997 tcg_temp_free(temp3
);
1998 tcg_temp_free_i64(t1
);
1999 tcg_temp_free_i64(t2
);
2000 tcg_temp_free_i64(t3
);
2001 tcg_temp_free_i64(t4
);
2005 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2007 TCGv temp
= tcg_temp_new();
2008 TCGv temp2
= tcg_temp_new();
2010 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2011 } else { /* n is expected to be 1 */
2012 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2013 tcg_gen_shli_tl(temp
, temp
, 1);
2014 /* catch special case r1 = r2 = 0x8000 */
2015 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2016 tcg_gen_sub_tl(temp
, temp
, temp2
);
2018 gen_sub_d(ret
, arg1
, temp
);
2020 tcg_temp_free(temp
);
2021 tcg_temp_free(temp2
);
2025 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
2027 TCGv temp
= tcg_temp_new();
2028 TCGv temp2
= tcg_temp_new();
2030 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2031 } else { /* n is expected to be 1 */
2032 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2033 tcg_gen_shli_tl(temp
, temp
, 1);
2034 /* catch special case r1 = r2 = 0x8000 */
2035 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2036 tcg_gen_sub_tl(temp
, temp
, temp2
);
2038 gen_subs(ret
, arg1
, temp
);
2040 tcg_temp_free(temp
);
2041 tcg_temp_free(temp2
);
2045 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2046 TCGv arg3
, uint32_t n
)
2048 TCGv temp
= tcg_temp_new();
2049 TCGv temp2
= tcg_temp_new();
2050 TCGv_i64 t1
= tcg_temp_new_i64();
2051 TCGv_i64 t2
= tcg_temp_new_i64();
2052 TCGv_i64 t3
= tcg_temp_new_i64();
2055 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2056 } else { /* n is expected to be 1 */
2057 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2058 tcg_gen_shli_tl(temp
, temp
, 1);
2059 /* catch special case r1 = r2 = 0x8000 */
2060 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2061 tcg_gen_sub_tl(temp
, temp
, temp2
);
2063 tcg_gen_ext_i32_i64(t2
, temp
);
2064 tcg_gen_shli_i64(t2
, t2
, 16);
2065 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2066 gen_sub64_d(t3
, t1
, t2
);
2067 /* write back result */
2068 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
2070 tcg_temp_free_i64(t1
);
2071 tcg_temp_free_i64(t2
);
2072 tcg_temp_free_i64(t3
);
2073 tcg_temp_free(temp
);
2074 tcg_temp_free(temp2
);
2078 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2079 TCGv arg3
, uint32_t n
)
2081 TCGv temp
= tcg_temp_new();
2082 TCGv temp2
= tcg_temp_new();
2083 TCGv_i64 t1
= tcg_temp_new_i64();
2084 TCGv_i64 t2
= tcg_temp_new_i64();
2087 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2088 } else { /* n is expected to be 1 */
2089 tcg_gen_mul_tl(temp
, arg2
, arg3
);
2090 tcg_gen_shli_tl(temp
, temp
, 1);
2091 /* catch special case r1 = r2 = 0x8000 */
2092 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
2093 tcg_gen_sub_tl(temp
, temp
, temp2
);
2095 tcg_gen_ext_i32_i64(t2
, temp
);
2096 tcg_gen_shli_i64(t2
, t2
, 16);
2097 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2099 gen_helper_sub64_ssov(t1
, cpu_env
, t1
, t2
);
2100 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
2102 tcg_temp_free(temp
);
2103 tcg_temp_free(temp2
);
2104 tcg_temp_free_i64(t1
);
2105 tcg_temp_free_i64(t2
);
2109 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2110 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
2112 TCGv_i64 t1
= tcg_temp_new_i64();
2113 TCGv_i64 t2
= tcg_temp_new_i64();
2114 TCGv_i64 t3
= tcg_temp_new_i64();
2115 TCGv_i64 t4
= tcg_temp_new_i64();
2118 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
2119 tcg_gen_ext_i32_i64(t2
, arg2
);
2120 tcg_gen_ext_i32_i64(t3
, arg3
);
2122 tcg_gen_mul_i64(t2
, t2
, t3
);
2124 tcg_gen_shli_i64(t2
, t2
, 1);
2126 tcg_gen_sub_i64(t4
, t1
, t2
);
2128 tcg_gen_xor_i64(t3
, t4
, t1
);
2129 tcg_gen_xor_i64(t2
, t1
, t2
);
2130 tcg_gen_and_i64(t3
, t3
, t2
);
2131 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
2132 /* We produce an overflow on the host if the mul before was
2133 (0x80000000 * 0x80000000) << 1). If this is the
2134 case, we negate the ovf. */
2136 temp
= tcg_temp_new();
2137 temp2
= tcg_temp_new();
2138 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
2139 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
2140 tcg_gen_and_tl(temp
, temp
, temp2
);
2141 tcg_gen_shli_tl(temp
, temp
, 31);
2142 /* negate v bit, if special condition */
2143 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2145 tcg_temp_free(temp
);
2146 tcg_temp_free(temp2
);
2148 /* write back result */
2149 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
2151 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2152 /* Calc AV/SAV bits */
2153 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2154 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2156 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2158 tcg_temp_free_i64(t1
);
2159 tcg_temp_free_i64(t2
);
2160 tcg_temp_free_i64(t3
);
2161 tcg_temp_free_i64(t4
);
2165 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
2168 TCGv_i64 t1
= tcg_temp_new_i64();
2169 TCGv_i64 t2
= tcg_temp_new_i64();
2170 TCGv_i64 t3
= tcg_temp_new_i64();
2171 TCGv_i64 t4
= tcg_temp_new_i64();
2173 tcg_gen_ext_i32_i64(t1
, arg1
);
2174 tcg_gen_ext_i32_i64(t2
, arg2
);
2175 tcg_gen_ext_i32_i64(t3
, arg3
);
2177 tcg_gen_mul_i64(t2
, t2
, t3
);
2178 /* if we shift part of the fraction out, we need to round up */
2179 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
2180 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
2181 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
2182 tcg_gen_add_i64(t3
, t3
, t4
);
2184 gen_helper_msub32_q_sub_ssov(ret
, cpu_env
, t1
, t3
);
2186 tcg_temp_free_i64(t1
);
2187 tcg_temp_free_i64(t2
);
2188 tcg_temp_free_i64(t3
);
2189 tcg_temp_free_i64(t4
);
2193 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
2194 TCGv arg3
, uint32_t n
)
2196 TCGv_i64 r1
= tcg_temp_new_i64();
2197 TCGv temp
= tcg_const_i32(n
);
2199 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
2200 gen_helper_msub64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
2201 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
2203 tcg_temp_free_i64(r1
);
2204 tcg_temp_free(temp
);
2208 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2209 TCGv r3
, uint32_t n
, uint32_t mode
)
2211 TCGv temp
= tcg_const_i32(n
);
2212 TCGv temp2
= tcg_temp_new();
2213 TCGv_i64 temp64
= tcg_temp_new_i64();
2216 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2219 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2222 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2225 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2228 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2229 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
2230 tcg_gen_add_tl
, tcg_gen_sub_tl
);
2231 tcg_temp_free(temp
);
2232 tcg_temp_free(temp2
);
2233 tcg_temp_free_i64(temp64
);
2237 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2238 TCGv r3
, uint32_t n
, uint32_t mode
)
2240 TCGv temp
= tcg_const_i32(n
);
2241 TCGv_i64 temp64
= tcg_temp_new_i64();
2242 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2243 TCGv_i64 temp64_3
= tcg_temp_new_i64();
2246 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2249 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2252 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2255 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2258 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
2259 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2260 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2261 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2262 tcg_gen_shli_i64(temp64
, temp64
, 16);
2264 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
2265 /* write back result */
2266 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
2268 tcg_temp_free(temp
);
2269 tcg_temp_free_i64(temp64
);
2270 tcg_temp_free_i64(temp64_2
);
2271 tcg_temp_free_i64(temp64_3
);
2275 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2277 TCGv temp
= tcg_const_i32(n
);
2278 TCGv temp2
= tcg_temp_new();
2279 TCGv_i64 temp64
= tcg_temp_new_i64();
2282 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2285 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2288 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2291 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2294 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2295 tcg_gen_shli_tl(temp
, r1
, 16);
2296 gen_helper_subadr_h(ret
, cpu_env
, temp64
, temp
, temp2
);
2298 tcg_temp_free(temp
);
2299 tcg_temp_free(temp2
);
2300 tcg_temp_free_i64(temp64
);
2304 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2305 TCGv r3
, uint32_t n
, uint32_t mode
)
2307 TCGv temp
= tcg_const_i32(n
);
2308 TCGv temp2
= tcg_temp_new();
2309 TCGv temp3
= tcg_temp_new();
2310 TCGv_i64 temp64
= tcg_temp_new_i64();
2314 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2317 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2320 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2323 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2326 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2327 gen_adds(ret_low
, r1_low
, temp
);
2328 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2329 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2330 gen_subs(ret_high
, r1_high
, temp2
);
2331 /* combine v bits */
2332 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2333 /* combine av bits */
2334 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2336 tcg_temp_free(temp
);
2337 tcg_temp_free(temp2
);
2338 tcg_temp_free(temp3
);
2339 tcg_temp_free_i64(temp64
);
2343 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2344 TCGv r3
, uint32_t n
, uint32_t mode
)
2346 TCGv temp
= tcg_const_i32(n
);
2347 TCGv_i64 temp64
= tcg_temp_new_i64();
2348 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2352 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2355 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2358 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2361 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2364 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2365 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2366 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2367 tcg_gen_shli_i64(temp64
, temp64
, 16);
2368 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2370 gen_helper_sub64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
2371 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2373 tcg_temp_free(temp
);
2374 tcg_temp_free_i64(temp64
);
2375 tcg_temp_free_i64(temp64_2
);
2379 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2381 TCGv temp
= tcg_const_i32(n
);
2382 TCGv temp2
= tcg_temp_new();
2383 TCGv_i64 temp64
= tcg_temp_new_i64();
2386 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
2389 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
2392 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
2395 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
2398 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2399 tcg_gen_shli_tl(temp
, r1
, 16);
2400 gen_helper_subadr_h_ssov(ret
, cpu_env
, temp64
, temp
, temp2
);
2402 tcg_temp_free(temp
);
2403 tcg_temp_free(temp2
);
2404 tcg_temp_free_i64(temp64
);
2407 static inline void gen_abs(TCGv ret
, TCGv r1
)
2409 TCGv temp
= tcg_temp_new();
2410 TCGv t0
= tcg_const_i32(0);
2412 tcg_gen_neg_tl(temp
, r1
);
2413 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
2414 /* overflow can only happen, if r1 = 0x80000000 */
2415 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2416 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2418 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2420 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2421 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2423 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2425 tcg_temp_free(temp
);
2429 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2431 TCGv temp
= tcg_temp_new_i32();
2432 TCGv result
= tcg_temp_new_i32();
2434 tcg_gen_sub_tl(result
, r1
, r2
);
2435 tcg_gen_sub_tl(temp
, r2
, r1
);
2436 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2439 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2440 tcg_gen_xor_tl(temp
, result
, r2
);
2441 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2442 tcg_gen_xor_tl(temp
, r1
, r2
);
2443 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2445 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2447 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2448 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2450 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2451 /* write back result */
2452 tcg_gen_mov_tl(ret
, result
);
2454 tcg_temp_free(temp
);
2455 tcg_temp_free(result
);
2458 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2460 TCGv temp
= tcg_const_i32(con
);
2461 gen_absdif(ret
, r1
, temp
);
2462 tcg_temp_free(temp
);
2465 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2467 TCGv temp
= tcg_const_i32(con
);
2468 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
2469 tcg_temp_free(temp
);
2472 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2474 TCGv high
= tcg_temp_new();
2475 TCGv low
= tcg_temp_new();
2477 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2478 tcg_gen_mov_tl(ret
, low
);
2480 tcg_gen_sari_tl(low
, low
, 31);
2481 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2482 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2484 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2486 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2487 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2489 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2491 tcg_temp_free(high
);
2495 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2497 TCGv temp
= tcg_const_i32(con
);
2498 gen_mul_i32s(ret
, r1
, temp
);
2499 tcg_temp_free(temp
);
2502 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2504 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2506 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2508 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2510 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2511 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2513 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2516 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2519 TCGv temp
= tcg_const_i32(con
);
2520 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2521 tcg_temp_free(temp
);
2524 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2526 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2528 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2530 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2532 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2533 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2535 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2538 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2541 TCGv temp
= tcg_const_i32(con
);
2542 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2543 tcg_temp_free(temp
);
2546 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2548 TCGv temp
= tcg_const_i32(con
);
2549 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
2550 tcg_temp_free(temp
);
2553 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2555 TCGv temp
= tcg_const_i32(con
);
2556 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
2557 tcg_temp_free(temp
);
2559 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2560 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2562 TCGv temp
= tcg_const_i32(con
);
2563 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2564 tcg_temp_free(temp
);
2567 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2569 TCGv temp
= tcg_const_i32(con
);
2570 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2571 tcg_temp_free(temp
);
2575 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2577 TCGv temp
= tcg_temp_new();
2578 TCGv_i64 temp_64
= tcg_temp_new_i64();
2579 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2582 if (up_shift
== 32) {
2583 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2584 } else if (up_shift
== 16) {
2585 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2586 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2588 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2589 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2590 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2592 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2595 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2596 } else { /* n is expected to be 1 */
2597 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2598 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2600 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2602 if (up_shift
== 0) {
2603 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2605 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2607 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2608 /* overflow only occurs if r1 = r2 = 0x8000 */
2609 if (up_shift
== 0) {/* result is 64 bit */
2610 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2612 } else { /* result is 32 bit */
2613 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2616 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2617 /* calc sv overflow bit */
2618 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2620 /* calc av overflow bit */
2621 if (up_shift
== 0) {
2622 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2623 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2625 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2626 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2628 /* calc sav overflow bit */
2629 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2630 tcg_temp_free(temp
);
2631 tcg_temp_free_i64(temp_64
);
2632 tcg_temp_free_i64(temp2_64
);
2636 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2638 TCGv temp
= tcg_temp_new();
2640 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2641 } else { /* n is expected to be 1 */
2642 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2643 tcg_gen_shli_tl(ret
, ret
, 1);
2644 /* catch special case r1 = r2 = 0x8000 */
2645 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2646 tcg_gen_sub_tl(ret
, ret
, temp
);
2649 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2650 /* calc av overflow bit */
2651 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2652 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2653 /* calc sav overflow bit */
2654 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2656 tcg_temp_free(temp
);
2659 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2661 TCGv temp
= tcg_temp_new();
2663 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2664 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2666 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2667 tcg_gen_shli_tl(ret
, ret
, 1);
2668 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2669 /* catch special case r1 = r2 = 0x8000 */
2670 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2671 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2672 tcg_gen_sub_tl(ret
, ret
, temp
);
2675 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2676 /* calc av overflow bit */
2677 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2678 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2679 /* calc sav overflow bit */
2680 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2681 /* cut halfword off */
2682 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2684 tcg_temp_free(temp
);
2688 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2691 TCGv_i64 temp64
= tcg_temp_new_i64();
2692 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2693 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2694 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2695 tcg_temp_free_i64(temp64
);
2699 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2702 TCGv temp
= tcg_const_i32(con
);
2703 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2704 tcg_temp_free(temp
);
2708 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2711 TCGv_i64 temp64
= tcg_temp_new_i64();
2712 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2713 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2714 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2715 tcg_temp_free_i64(temp64
);
2719 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2722 TCGv temp
= tcg_const_i32(con
);
2723 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2724 tcg_temp_free(temp
);
2727 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2729 TCGv temp
= tcg_const_i32(con
);
2730 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
2731 tcg_temp_free(temp
);
2734 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2736 TCGv temp
= tcg_const_i32(con
);
2737 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
2738 tcg_temp_free(temp
);
2742 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2745 TCGv_i64 temp64
= tcg_temp_new_i64();
2746 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2747 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
2748 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2749 tcg_temp_free_i64(temp64
);
2753 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2756 TCGv temp
= tcg_const_i32(con
);
2757 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2758 tcg_temp_free(temp
);
2762 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2765 TCGv_i64 temp64
= tcg_temp_new_i64();
2766 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2767 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
2768 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2769 tcg_temp_free_i64(temp64
);
2773 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2776 TCGv temp
= tcg_const_i32(con
);
2777 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2778 tcg_temp_free(temp
);
2781 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2783 TCGv sat_neg
= tcg_const_i32(low
);
2784 TCGv temp
= tcg_const_i32(up
);
2786 /* sat_neg = (arg < low ) ? low : arg; */
2787 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
2789 /* ret = (sat_neg > up ) ? up : sat_neg; */
2790 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
2792 tcg_temp_free(sat_neg
);
2793 tcg_temp_free(temp
);
2796 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2798 TCGv temp
= tcg_const_i32(up
);
2799 /* sat_neg = (arg > up ) ? up : arg; */
2800 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
2801 tcg_temp_free(temp
);
2804 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2806 if (shift_count
== -32) {
2807 tcg_gen_movi_tl(ret
, 0);
2808 } else if (shift_count
>= 0) {
2809 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2811 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2815 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2817 TCGv temp_low
, temp_high
;
2819 if (shiftcount
== -16) {
2820 tcg_gen_movi_tl(ret
, 0);
2822 temp_high
= tcg_temp_new();
2823 temp_low
= tcg_temp_new();
2825 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2826 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2827 gen_shi(temp_low
, temp_low
, shiftcount
);
2828 gen_shi(ret
, temp_high
, shiftcount
);
2829 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2831 tcg_temp_free(temp_low
);
2832 tcg_temp_free(temp_high
);
2836 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2838 uint32_t msk
, msk_start
;
2839 TCGv temp
= tcg_temp_new();
2840 TCGv temp2
= tcg_temp_new();
2841 TCGv t_0
= tcg_const_i32(0);
2843 if (shift_count
== 0) {
2844 /* Clear PSW.C and PSW.V */
2845 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2846 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2847 tcg_gen_mov_tl(ret
, r1
);
2848 } else if (shift_count
== -32) {
2850 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2851 /* fill ret completly with sign bit */
2852 tcg_gen_sari_tl(ret
, r1
, 31);
2854 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2855 } else if (shift_count
> 0) {
2856 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
2857 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
2860 msk_start
= 32 - shift_count
;
2861 msk
= ((1 << shift_count
) - 1) << msk_start
;
2862 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2863 /* calc v/sv bits */
2864 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2865 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2866 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2867 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2869 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2871 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2873 tcg_temp_free(t_max
);
2874 tcg_temp_free(t_min
);
2877 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2879 msk
= (1 << -shift_count
) - 1;
2880 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2882 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2884 /* calc av overflow bit */
2885 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2886 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2887 /* calc sav overflow bit */
2888 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2890 tcg_temp_free(temp
);
2891 tcg_temp_free(temp2
);
2895 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2897 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
2900 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2902 TCGv temp
= tcg_const_i32(con
);
2903 gen_shas(ret
, r1
, temp
);
2904 tcg_temp_free(temp
);
2907 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2911 if (shift_count
== 0) {
2912 tcg_gen_mov_tl(ret
, r1
);
2913 } else if (shift_count
> 0) {
2914 low
= tcg_temp_new();
2915 high
= tcg_temp_new();
2917 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2918 tcg_gen_shli_tl(low
, r1
, shift_count
);
2919 tcg_gen_shli_tl(ret
, high
, shift_count
);
2920 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2923 tcg_temp_free(high
);
2925 low
= tcg_temp_new();
2926 high
= tcg_temp_new();
2928 tcg_gen_ext16s_tl(low
, r1
);
2929 tcg_gen_sari_tl(low
, low
, -shift_count
);
2930 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2931 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2934 tcg_temp_free(high
);
2939 /* ret = {ret[30:0], (r1 cond r2)}; */
2940 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2942 TCGv temp
= tcg_temp_new();
2943 TCGv temp2
= tcg_temp_new();
2945 tcg_gen_shli_tl(temp
, ret
, 1);
2946 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2947 tcg_gen_or_tl(ret
, temp
, temp2
);
2949 tcg_temp_free(temp
);
2950 tcg_temp_free(temp2
);
2953 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2955 TCGv temp
= tcg_const_i32(con
);
2956 gen_sh_cond(cond
, ret
, r1
, temp
);
2957 tcg_temp_free(temp
);
2960 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2962 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2965 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2967 TCGv temp
= tcg_const_i32(con
);
2968 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2969 tcg_temp_free(temp
);
2972 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2974 TCGv temp
= tcg_const_i32(con
);
2975 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2976 tcg_temp_free(temp
);
2979 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2981 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2984 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2986 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2989 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2991 void(*op1
)(TCGv
, TCGv
, TCGv
),
2992 void(*op2
)(TCGv
, TCGv
, TCGv
))
2996 temp1
= tcg_temp_new();
2997 temp2
= tcg_temp_new();
2999 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3000 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3002 (*op1
)(temp1
, temp1
, temp2
);
3003 (*op2
)(temp1
, ret
, temp1
);
3005 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
3007 tcg_temp_free(temp1
);
3008 tcg_temp_free(temp2
);
3011 /* ret = r1[pos1] op1 r2[pos2]; */
3012 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
3014 void(*op1
)(TCGv
, TCGv
, TCGv
))
3018 temp1
= tcg_temp_new();
3019 temp2
= tcg_temp_new();
3021 tcg_gen_shri_tl(temp2
, r2
, pos2
);
3022 tcg_gen_shri_tl(temp1
, r1
, pos1
);
3024 (*op1
)(ret
, temp1
, temp2
);
3026 tcg_gen_andi_tl(ret
, ret
, 0x1);
3028 tcg_temp_free(temp1
);
3029 tcg_temp_free(temp2
);
3032 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
3033 void(*op
)(TCGv
, TCGv
, TCGv
))
3035 TCGv temp
= tcg_temp_new();
3036 TCGv temp2
= tcg_temp_new();
3037 /* temp = (arg1 cond arg2 )*/
3038 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
3040 tcg_gen_andi_tl(temp2
, ret
, 0x1);
3041 /* temp = temp insn temp2 */
3042 (*op
)(temp
, temp
, temp2
);
3043 /* ret = {ret[31:1], temp} */
3044 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
3046 tcg_temp_free(temp
);
3047 tcg_temp_free(temp2
);
3051 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
3052 void(*op
)(TCGv
, TCGv
, TCGv
))
3054 TCGv temp
= tcg_const_i32(con
);
3055 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
3056 tcg_temp_free(temp
);
3059 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
3060 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
3062 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
3063 tcg_gen_neg_tl(ret
, ret
);
3066 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
3068 TCGv b0
= tcg_temp_new();
3069 TCGv b1
= tcg_temp_new();
3070 TCGv b2
= tcg_temp_new();
3071 TCGv b3
= tcg_temp_new();
3074 tcg_gen_andi_tl(b0
, r1
, 0xff);
3075 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
3078 tcg_gen_andi_tl(b1
, r1
, 0xff00);
3079 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
3082 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
3083 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
3086 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
3087 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
3090 tcg_gen_or_tl(ret
, b0
, b1
);
3091 tcg_gen_or_tl(ret
, ret
, b2
);
3092 tcg_gen_or_tl(ret
, ret
, b3
);
3100 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
3102 TCGv h0
= tcg_temp_new();
3103 TCGv h1
= tcg_temp_new();
3106 tcg_gen_andi_tl(h0
, r1
, 0xffff);
3107 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
3110 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
3111 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
3114 tcg_gen_or_tl(ret
, h0
, h1
);
3119 /* mask = ((1 << width) -1) << pos;
3120 ret = (r1 & ~mask) | (r2 << pos) & mask); */
3121 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
3123 TCGv mask
= tcg_temp_new();
3124 TCGv temp
= tcg_temp_new();
3125 TCGv temp2
= tcg_temp_new();
3127 tcg_gen_movi_tl(mask
, 1);
3128 tcg_gen_shl_tl(mask
, mask
, width
);
3129 tcg_gen_subi_tl(mask
, mask
, 1);
3130 tcg_gen_shl_tl(mask
, mask
, pos
);
3132 tcg_gen_shl_tl(temp
, r2
, pos
);
3133 tcg_gen_and_tl(temp
, temp
, mask
);
3134 tcg_gen_andc_tl(temp2
, r1
, mask
);
3135 tcg_gen_or_tl(ret
, temp
, temp2
);
3137 tcg_temp_free(mask
);
3138 tcg_temp_free(temp
);
3139 tcg_temp_free(temp2
);
3142 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
3144 TCGv_i64 temp
= tcg_temp_new_i64();
3146 gen_helper_bsplit(temp
, r1
);
3147 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3149 tcg_temp_free_i64(temp
);
3152 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
3154 TCGv_i64 temp
= tcg_temp_new_i64();
3156 gen_helper_unpack(temp
, r1
);
3157 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
3159 tcg_temp_free_i64(temp
);
3163 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3165 TCGv_i64 ret
= tcg_temp_new_i64();
3167 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3168 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
3170 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
3172 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3174 tcg_temp_free_i64(ret
);
3178 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
3180 TCGv_i64 ret
= tcg_temp_new_i64();
3182 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
3183 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
3185 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
3187 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
3189 tcg_temp_free_i64(ret
);
3192 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
3194 TCGv temp
= tcg_temp_new();
3196 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
3197 tcg_gen_xor_tl(temp
, temp
, arg_low
);
3198 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
3199 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
3200 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3202 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3203 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3204 tcg_temp_free(temp
);
3207 static void gen_calc_usb_mulr_h(TCGv arg
)
3209 TCGv temp
= tcg_temp_new();
3211 tcg_gen_add_tl(temp
, arg
, arg
);
3212 tcg_gen_xor_tl(temp
, temp
, arg
);
3213 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
3214 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
3216 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3218 tcg_gen_movi_tl(cpu_PSW_V
, 0);
3219 tcg_temp_free(temp
);
3222 /* helpers for generating program flow micro-ops */
3224 static inline void gen_save_pc(target_ulong pc
)
3226 tcg_gen_movi_tl(cpu_PC
, pc
);
3229 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
3231 TranslationBlock
*tb
;
3233 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
3234 likely(!ctx
->singlestep_enabled
)) {
3237 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
3240 if (ctx
->singlestep_enabled
) {
3241 /* raise exception debug */
3247 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3248 TCGv r2
, int16_t address
)
3250 TCGLabel
*jumpLabel
= gen_new_label();
3251 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
3253 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
3255 gen_set_label(jumpLabel
);
3256 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
3259 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
3260 int r2
, int16_t address
)
3262 TCGv temp
= tcg_const_i32(r2
);
3263 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
3264 tcg_temp_free(temp
);
3267 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
3269 TCGLabel
*l1
= gen_new_label();
3271 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
3272 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
3273 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
3275 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
3278 static void gen_fcall_save_ctx(DisasContext
*ctx
)
3280 TCGv temp
= tcg_temp_new();
3282 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
3283 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
3284 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3285 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
3287 tcg_temp_free(temp
);
3290 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
3291 int r2
, int32_t constant
, int32_t offset
)
3297 /* SB-format jumps */
3300 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3302 case OPC1_32_B_CALL
:
3303 case OPC1_16_SB_CALL
:
3304 gen_helper_1arg(call
, ctx
->next_pc
);
3305 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3308 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
3310 case OPC1_16_SB_JNZ
:
3311 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
3313 /* SBC-format jumps */
3314 case OPC1_16_SBC_JEQ
:
3315 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
3317 case OPC1_16_SBC_JNE
:
3318 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
3320 /* SBRN-format jumps */
3321 case OPC1_16_SBRN_JZ_T
:
3322 temp
= tcg_temp_new();
3323 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3324 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3325 tcg_temp_free(temp
);
3327 case OPC1_16_SBRN_JNZ_T
:
3328 temp
= tcg_temp_new();
3329 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
3330 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3331 tcg_temp_free(temp
);
3333 /* SBR-format jumps */
3334 case OPC1_16_SBR_JEQ
:
3335 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3338 case OPC1_16_SBR_JNE
:
3339 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
3342 case OPC1_16_SBR_JNZ
:
3343 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
3345 case OPC1_16_SBR_JNZ_A
:
3346 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3348 case OPC1_16_SBR_JGEZ
:
3349 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
3351 case OPC1_16_SBR_JGTZ
:
3352 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
3354 case OPC1_16_SBR_JLEZ
:
3355 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
3357 case OPC1_16_SBR_JLTZ
:
3358 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
3360 case OPC1_16_SBR_JZ
:
3361 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
3363 case OPC1_16_SBR_JZ_A
:
3364 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3366 case OPC1_16_SBR_LOOP
:
3367 gen_loop(ctx
, r1
, offset
* 2 - 32);
3369 /* SR-format jumps */
3371 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
3374 case OPC2_32_SYS_RET
:
3375 case OPC2_16_SR_RET
:
3376 gen_helper_ret(cpu_env
);
3380 case OPC1_32_B_CALLA
:
3381 gen_helper_1arg(call
, ctx
->next_pc
);
3382 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3384 case OPC1_32_B_FCALL
:
3385 gen_fcall_save_ctx(ctx
);
3386 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3388 case OPC1_32_B_FCALLA
:
3389 gen_fcall_save_ctx(ctx
);
3390 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3393 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3396 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3399 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
3400 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3403 case OPCM_32_BRC_EQ_NEQ
:
3404 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3405 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3407 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3410 case OPCM_32_BRC_GE
:
3411 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3412 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3414 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3415 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3419 case OPCM_32_BRC_JLT
:
3420 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3421 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3423 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3424 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3428 case OPCM_32_BRC_JNE
:
3429 temp
= tcg_temp_new();
3430 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3431 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3432 /* subi is unconditional */
3433 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3434 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3436 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3437 /* addi is unconditional */
3438 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3439 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3441 tcg_temp_free(temp
);
3444 case OPCM_32_BRN_JTT
:
3445 n
= MASK_OP_BRN_N(ctx
->opcode
);
3447 temp
= tcg_temp_new();
3448 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3450 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3451 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3453 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3455 tcg_temp_free(temp
);
3458 case OPCM_32_BRR_EQ_NEQ
:
3459 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3460 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3463 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3467 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3468 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3469 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3472 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3476 case OPCM_32_BRR_GE
:
3477 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3478 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3481 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3485 case OPCM_32_BRR_JLT
:
3486 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3487 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3490 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3494 case OPCM_32_BRR_LOOP
:
3495 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3496 gen_loop(ctx
, r2
, offset
* 2);
3498 /* OPC2_32_BRR_LOOPU */
3499 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
3502 case OPCM_32_BRR_JNE
:
3503 temp
= tcg_temp_new();
3504 temp2
= tcg_temp_new();
3505 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3506 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3507 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3508 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3509 /* subi is unconditional */
3510 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3511 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3513 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3514 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3515 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3516 /* addi is unconditional */
3517 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3518 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3520 tcg_temp_free(temp
);
3521 tcg_temp_free(temp2
);
3523 case OPCM_32_BRR_JNZ
:
3524 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3525 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3527 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3531 printf("Branch Error at %x\n", ctx
->pc
);
3533 ctx
->bstate
= BS_BRANCH
;
3538 * Functions for decoding instructions
3541 static void decode_src_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int op1
)
3547 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3548 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3551 case OPC1_16_SRC_ADD
:
3552 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3554 case OPC1_16_SRC_ADD_A15
:
3555 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3557 case OPC1_16_SRC_ADD_15A
:
3558 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3560 case OPC1_16_SRC_ADD_A
:
3561 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3563 case OPC1_16_SRC_CADD
:
3564 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3567 case OPC1_16_SRC_CADDN
:
3568 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3571 case OPC1_16_SRC_CMOV
:
3572 temp
= tcg_const_tl(0);
3573 temp2
= tcg_const_tl(const4
);
3574 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3575 temp2
, cpu_gpr_d
[r1
]);
3576 tcg_temp_free(temp
);
3577 tcg_temp_free(temp2
);
3579 case OPC1_16_SRC_CMOVN
:
3580 temp
= tcg_const_tl(0);
3581 temp2
= tcg_const_tl(const4
);
3582 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3583 temp2
, cpu_gpr_d
[r1
]);
3584 tcg_temp_free(temp
);
3585 tcg_temp_free(temp2
);
3587 case OPC1_16_SRC_EQ
:
3588 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3591 case OPC1_16_SRC_LT
:
3592 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3595 case OPC1_16_SRC_MOV
:
3596 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3598 case OPC1_16_SRC_MOV_A
:
3599 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3600 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3602 case OPC1_16_SRC_MOV_E
:
3603 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
3604 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3605 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3606 } /* TODO: else raise illegal opcode trap */
3608 case OPC1_16_SRC_SH
:
3609 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3611 case OPC1_16_SRC_SHA
:
3612 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3617 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3622 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3623 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3626 case OPC1_16_SRR_ADD
:
3627 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3629 case OPC1_16_SRR_ADD_A15
:
3630 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3632 case OPC1_16_SRR_ADD_15A
:
3633 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3635 case OPC1_16_SRR_ADD_A
:
3636 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3638 case OPC1_16_SRR_ADDS
:
3639 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3641 case OPC1_16_SRR_AND
:
3642 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3644 case OPC1_16_SRR_CMOV
:
3645 temp
= tcg_const_tl(0);
3646 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3647 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3648 tcg_temp_free(temp
);
3650 case OPC1_16_SRR_CMOVN
:
3651 temp
= tcg_const_tl(0);
3652 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3653 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3654 tcg_temp_free(temp
);
3656 case OPC1_16_SRR_EQ
:
3657 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3660 case OPC1_16_SRR_LT
:
3661 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3664 case OPC1_16_SRR_MOV
:
3665 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3667 case OPC1_16_SRR_MOV_A
:
3668 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3670 case OPC1_16_SRR_MOV_AA
:
3671 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3673 case OPC1_16_SRR_MOV_D
:
3674 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3676 case OPC1_16_SRR_MUL
:
3677 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3679 case OPC1_16_SRR_OR
:
3680 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3682 case OPC1_16_SRR_SUB
:
3683 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3685 case OPC1_16_SRR_SUB_A15B
:
3686 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3688 case OPC1_16_SRR_SUB_15AB
:
3689 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3691 case OPC1_16_SRR_SUBS
:
3692 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3694 case OPC1_16_SRR_XOR
:
3695 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3700 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3704 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3705 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3708 case OPC1_16_SSR_ST_A
:
3709 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3711 case OPC1_16_SSR_ST_A_POSTINC
:
3712 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3713 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3715 case OPC1_16_SSR_ST_B
:
3716 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3718 case OPC1_16_SSR_ST_B_POSTINC
:
3719 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3720 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3722 case OPC1_16_SSR_ST_H
:
3723 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3725 case OPC1_16_SSR_ST_H_POSTINC
:
3726 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3727 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3729 case OPC1_16_SSR_ST_W
:
3730 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3732 case OPC1_16_SSR_ST_W_POSTINC
:
3733 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3734 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3739 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3743 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3746 case OPC1_16_SC_AND
:
3747 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3749 case OPC1_16_SC_BISR
:
3750 gen_helper_1arg(bisr
, const16
& 0xff);
3752 case OPC1_16_SC_LD_A
:
3753 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3755 case OPC1_16_SC_LD_W
:
3756 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3758 case OPC1_16_SC_MOV
:
3759 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3762 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3764 case OPC1_16_SC_ST_A
:
3765 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3767 case OPC1_16_SC_ST_W
:
3768 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3770 case OPC1_16_SC_SUB_A
:
3771 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3776 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3780 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3781 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3785 case OPC1_16_SLR_LD_A
:
3786 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3788 case OPC1_16_SLR_LD_A_POSTINC
:
3789 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3790 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3792 case OPC1_16_SLR_LD_BU
:
3793 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3795 case OPC1_16_SLR_LD_BU_POSTINC
:
3796 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3797 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3799 case OPC1_16_SLR_LD_H
:
3800 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3802 case OPC1_16_SLR_LD_H_POSTINC
:
3803 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3804 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3806 case OPC1_16_SLR_LD_W
:
3807 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3809 case OPC1_16_SLR_LD_W_POSTINC
:
3810 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3811 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3816 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3821 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3822 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3826 case OPC1_16_SRO_LD_A
:
3827 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3829 case OPC1_16_SRO_LD_BU
:
3830 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3832 case OPC1_16_SRO_LD_H
:
3833 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
3835 case OPC1_16_SRO_LD_W
:
3836 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3838 case OPC1_16_SRO_ST_A
:
3839 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3841 case OPC1_16_SRO_ST_B
:
3842 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3844 case OPC1_16_SRO_ST_H
:
3845 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3847 case OPC1_16_SRO_ST_W
:
3848 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3853 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
3856 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3859 case OPC2_16_SR_NOP
:
3861 case OPC2_16_SR_RET
:
3862 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3864 case OPC2_16_SR_RFE
:
3865 gen_helper_rfe(cpu_env
);
3867 ctx
->bstate
= BS_BRANCH
;
3869 case OPC2_16_SR_DEBUG
:
3870 /* raise EXCP_DEBUG */
3875 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
3881 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3882 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3885 case OPC2_16_SR_RSUB
:
3886 /* overflow only if r1 = -0x80000000 */
3887 temp
= tcg_const_i32(-0x80000000);
3889 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
3890 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3892 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3894 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3896 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3897 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3899 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3900 tcg_temp_free(temp
);
3902 case OPC2_16_SR_SAT_B
:
3903 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3905 case OPC2_16_SR_SAT_BU
:
3906 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3908 case OPC2_16_SR_SAT_H
:
3909 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3911 case OPC2_16_SR_SAT_HU
:
3912 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3917 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3925 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3927 /* handle ADDSC.A opcode only being 6 bit long */
3928 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3929 op1
= OPC1_16_SRRS_ADDSC_A
;
3933 case OPC1_16_SRC_ADD
:
3934 case OPC1_16_SRC_ADD_A15
:
3935 case OPC1_16_SRC_ADD_15A
:
3936 case OPC1_16_SRC_ADD_A
:
3937 case OPC1_16_SRC_CADD
:
3938 case OPC1_16_SRC_CADDN
:
3939 case OPC1_16_SRC_CMOV
:
3940 case OPC1_16_SRC_CMOVN
:
3941 case OPC1_16_SRC_EQ
:
3942 case OPC1_16_SRC_LT
:
3943 case OPC1_16_SRC_MOV
:
3944 case OPC1_16_SRC_MOV_A
:
3945 case OPC1_16_SRC_MOV_E
:
3946 case OPC1_16_SRC_SH
:
3947 case OPC1_16_SRC_SHA
:
3948 decode_src_opc(env
, ctx
, op1
);
3951 case OPC1_16_SRR_ADD
:
3952 case OPC1_16_SRR_ADD_A15
:
3953 case OPC1_16_SRR_ADD_15A
:
3954 case OPC1_16_SRR_ADD_A
:
3955 case OPC1_16_SRR_ADDS
:
3956 case OPC1_16_SRR_AND
:
3957 case OPC1_16_SRR_CMOV
:
3958 case OPC1_16_SRR_CMOVN
:
3959 case OPC1_16_SRR_EQ
:
3960 case OPC1_16_SRR_LT
:
3961 case OPC1_16_SRR_MOV
:
3962 case OPC1_16_SRR_MOV_A
:
3963 case OPC1_16_SRR_MOV_AA
:
3964 case OPC1_16_SRR_MOV_D
:
3965 case OPC1_16_SRR_MUL
:
3966 case OPC1_16_SRR_OR
:
3967 case OPC1_16_SRR_SUB
:
3968 case OPC1_16_SRR_SUB_A15B
:
3969 case OPC1_16_SRR_SUB_15AB
:
3970 case OPC1_16_SRR_SUBS
:
3971 case OPC1_16_SRR_XOR
:
3972 decode_srr_opc(ctx
, op1
);
3975 case OPC1_16_SSR_ST_A
:
3976 case OPC1_16_SSR_ST_A_POSTINC
:
3977 case OPC1_16_SSR_ST_B
:
3978 case OPC1_16_SSR_ST_B_POSTINC
:
3979 case OPC1_16_SSR_ST_H
:
3980 case OPC1_16_SSR_ST_H_POSTINC
:
3981 case OPC1_16_SSR_ST_W
:
3982 case OPC1_16_SSR_ST_W_POSTINC
:
3983 decode_ssr_opc(ctx
, op1
);
3986 case OPC1_16_SRRS_ADDSC_A
:
3987 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3988 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3989 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3990 temp
= tcg_temp_new();
3991 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3992 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3993 tcg_temp_free(temp
);
3996 case OPC1_16_SLRO_LD_A
:
3997 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3998 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3999 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4001 case OPC1_16_SLRO_LD_BU
:
4002 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4003 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4004 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4006 case OPC1_16_SLRO_LD_H
:
4007 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4008 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4009 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4011 case OPC1_16_SLRO_LD_W
:
4012 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
4013 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
4014 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4017 case OPC1_16_SB_CALL
:
4019 case OPC1_16_SB_JNZ
:
4021 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
4022 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
4025 case OPC1_16_SBC_JEQ
:
4026 case OPC1_16_SBC_JNE
:
4027 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
4028 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
4029 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4032 case OPC1_16_SBRN_JNZ_T
:
4033 case OPC1_16_SBRN_JZ_T
:
4034 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
4035 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
4036 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
4039 case OPC1_16_SBR_JEQ
:
4040 case OPC1_16_SBR_JGEZ
:
4041 case OPC1_16_SBR_JGTZ
:
4042 case OPC1_16_SBR_JLEZ
:
4043 case OPC1_16_SBR_JLTZ
:
4044 case OPC1_16_SBR_JNE
:
4045 case OPC1_16_SBR_JNZ
:
4046 case OPC1_16_SBR_JNZ_A
:
4047 case OPC1_16_SBR_JZ
:
4048 case OPC1_16_SBR_JZ_A
:
4049 case OPC1_16_SBR_LOOP
:
4050 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
4051 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
4052 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
4055 case OPC1_16_SC_AND
:
4056 case OPC1_16_SC_BISR
:
4057 case OPC1_16_SC_LD_A
:
4058 case OPC1_16_SC_LD_W
:
4059 case OPC1_16_SC_MOV
:
4061 case OPC1_16_SC_ST_A
:
4062 case OPC1_16_SC_ST_W
:
4063 case OPC1_16_SC_SUB_A
:
4064 decode_sc_opc(ctx
, op1
);
4067 case OPC1_16_SLR_LD_A
:
4068 case OPC1_16_SLR_LD_A_POSTINC
:
4069 case OPC1_16_SLR_LD_BU
:
4070 case OPC1_16_SLR_LD_BU_POSTINC
:
4071 case OPC1_16_SLR_LD_H
:
4072 case OPC1_16_SLR_LD_H_POSTINC
:
4073 case OPC1_16_SLR_LD_W
:
4074 case OPC1_16_SLR_LD_W_POSTINC
:
4075 decode_slr_opc(ctx
, op1
);
4078 case OPC1_16_SRO_LD_A
:
4079 case OPC1_16_SRO_LD_BU
:
4080 case OPC1_16_SRO_LD_H
:
4081 case OPC1_16_SRO_LD_W
:
4082 case OPC1_16_SRO_ST_A
:
4083 case OPC1_16_SRO_ST_B
:
4084 case OPC1_16_SRO_ST_H
:
4085 case OPC1_16_SRO_ST_W
:
4086 decode_sro_opc(ctx
, op1
);
4089 case OPC1_16_SSRO_ST_A
:
4090 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4091 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4092 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4094 case OPC1_16_SSRO_ST_B
:
4095 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4096 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4097 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
4099 case OPC1_16_SSRO_ST_H
:
4100 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4101 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4102 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
4104 case OPC1_16_SSRO_ST_W
:
4105 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
4106 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
4107 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
4110 case OPCM_16_SR_SYSTEM
:
4111 decode_sr_system(env
, ctx
);
4113 case OPCM_16_SR_ACCU
:
4114 decode_sr_accu(env
, ctx
);
4117 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4118 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
4120 case OPC1_16_SR_NOT
:
4121 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
4122 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
4128 * 32 bit instructions
4132 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
4139 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4140 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4141 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4143 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4146 case OPC2_32_ABS_LD_A
:
4147 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4149 case OPC2_32_ABS_LD_D
:
4150 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4152 case OPC2_32_ABS_LD_DA
:
4153 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4155 case OPC2_32_ABS_LD_W
:
4156 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4160 tcg_temp_free(temp
);
4163 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
4170 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4171 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4172 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4174 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4177 case OPC2_32_ABS_LD_B
:
4178 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
4180 case OPC2_32_ABS_LD_BU
:
4181 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4183 case OPC2_32_ABS_LD_H
:
4184 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
4186 case OPC2_32_ABS_LD_HU
:
4187 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4191 tcg_temp_free(temp
);
4194 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
4201 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4202 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4203 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4205 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4208 case OPC2_32_ABS_LDMST
:
4209 gen_ldmst(ctx
, r1
, temp
);
4211 case OPC2_32_ABS_SWAP_W
:
4212 gen_swap(ctx
, r1
, temp
);
4216 tcg_temp_free(temp
);
4219 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
4224 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4225 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4228 case OPC2_32_ABS_LDLCX
:
4229 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
4231 case OPC2_32_ABS_LDUCX
:
4232 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
4234 case OPC2_32_ABS_STLCX
:
4235 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
4237 case OPC2_32_ABS_STUCX
:
4238 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
4243 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
4250 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4251 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4252 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4254 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4257 case OPC2_32_ABS_ST_A
:
4258 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4260 case OPC2_32_ABS_ST_D
:
4261 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4263 case OPC2_32_ABS_ST_DA
:
4264 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4266 case OPC2_32_ABS_ST_W
:
4267 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
4271 tcg_temp_free(temp
);
4274 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
4281 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
4282 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
4283 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
4285 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
4288 case OPC2_32_ABS_ST_B
:
4289 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
4291 case OPC2_32_ABS_ST_H
:
4292 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
4295 tcg_temp_free(temp
);
4300 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
4306 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4307 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4308 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4309 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4310 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4311 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4315 case OPC2_32_BIT_AND_AND_T
:
4316 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4317 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
4319 case OPC2_32_BIT_AND_ANDN_T
:
4320 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4321 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
4323 case OPC2_32_BIT_AND_NOR_T
:
4324 if (TCG_TARGET_HAS_andc_i32
) {
4325 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4326 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
4328 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4329 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
4332 case OPC2_32_BIT_AND_OR_T
:
4333 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4334 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
4339 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
4344 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4345 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4346 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4347 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4348 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4349 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4352 case OPC2_32_BIT_AND_T
:
4353 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4354 pos1
, pos2
, &tcg_gen_and_tl
);
4356 case OPC2_32_BIT_ANDN_T
:
4357 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4358 pos1
, pos2
, &tcg_gen_andc_tl
);
4360 case OPC2_32_BIT_NOR_T
:
4361 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4362 pos1
, pos2
, &tcg_gen_nor_tl
);
4364 case OPC2_32_BIT_OR_T
:
4365 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4366 pos1
, pos2
, &tcg_gen_or_tl
);
4371 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4377 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4378 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4379 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4380 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4381 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4382 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4384 temp
= tcg_temp_new();
4386 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4387 if (op2
== OPC2_32_BIT_INSN_T
) {
4388 tcg_gen_not_tl(temp
, temp
);
4390 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4391 tcg_temp_free(temp
);
4394 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4401 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4402 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4403 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4404 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4405 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4406 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4409 case OPC2_32_BIT_NAND_T
:
4410 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4411 pos1
, pos2
, &tcg_gen_nand_tl
);
4413 case OPC2_32_BIT_ORN_T
:
4414 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4415 pos1
, pos2
, &tcg_gen_orc_tl
);
4417 case OPC2_32_BIT_XNOR_T
:
4418 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4419 pos1
, pos2
, &tcg_gen_eqv_tl
);
4421 case OPC2_32_BIT_XOR_T
:
4422 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4423 pos1
, pos2
, &tcg_gen_xor_tl
);
4428 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
4435 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4436 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4437 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4438 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4439 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4440 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4443 case OPC2_32_BIT_OR_AND_T
:
4444 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4445 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4447 case OPC2_32_BIT_OR_ANDN_T
:
4448 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4449 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4451 case OPC2_32_BIT_OR_NOR_T
:
4452 if (TCG_TARGET_HAS_orc_i32
) {
4453 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4454 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4456 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4457 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4460 case OPC2_32_BIT_OR_OR_T
:
4461 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4462 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4467 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
4474 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4475 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4476 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4477 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4478 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4479 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4481 temp
= tcg_temp_new();
4484 case OPC2_32_BIT_SH_AND_T
:
4485 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4486 pos1
, pos2
, &tcg_gen_and_tl
);
4488 case OPC2_32_BIT_SH_ANDN_T
:
4489 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4490 pos1
, pos2
, &tcg_gen_andc_tl
);
4492 case OPC2_32_BIT_SH_NOR_T
:
4493 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4494 pos1
, pos2
, &tcg_gen_nor_tl
);
4496 case OPC2_32_BIT_SH_OR_T
:
4497 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4498 pos1
, pos2
, &tcg_gen_or_tl
);
4501 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4502 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4503 tcg_temp_free(temp
);
4506 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
4513 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4514 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4515 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4516 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4517 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4518 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4520 temp
= tcg_temp_new();
4523 case OPC2_32_BIT_SH_NAND_T
:
4524 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4525 pos1
, pos2
, &tcg_gen_nand_tl
);
4527 case OPC2_32_BIT_SH_ORN_T
:
4528 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4529 pos1
, pos2
, &tcg_gen_orc_tl
);
4531 case OPC2_32_BIT_SH_XNOR_T
:
4532 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4533 pos1
, pos2
, &tcg_gen_eqv_tl
);
4535 case OPC2_32_BIT_SH_XOR_T
:
4536 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4537 pos1
, pos2
, &tcg_gen_xor_tl
);
4540 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4541 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4542 tcg_temp_free(temp
);
4548 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
4556 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4557 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4558 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4559 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4562 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4563 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4564 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4565 /* instruction to access the cache */
4567 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4568 case OPC2_32_BO_CACHEA_W_POSTINC
:
4569 case OPC2_32_BO_CACHEA_I_POSTINC
:
4570 /* instruction to access the cache, but we still need to handle
4571 the addressing mode */
4572 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4574 case OPC2_32_BO_CACHEA_WI_PREINC
:
4575 case OPC2_32_BO_CACHEA_W_PREINC
:
4576 case OPC2_32_BO_CACHEA_I_PREINC
:
4577 /* instruction to access the cache, but we still need to handle
4578 the addressing mode */
4579 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4581 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4582 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4583 /* TODO: Raise illegal opcode trap,
4584 if !tricore_feature(TRICORE_FEATURE_131) */
4586 case OPC2_32_BO_CACHEI_W_POSTINC
:
4587 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4588 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4589 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4590 } /* TODO: else raise illegal opcode trap */
4592 case OPC2_32_BO_CACHEI_W_PREINC
:
4593 case OPC2_32_BO_CACHEI_WI_PREINC
:
4594 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
4595 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4596 } /* TODO: else raise illegal opcode trap */
4598 case OPC2_32_BO_ST_A_SHORTOFF
:
4599 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4601 case OPC2_32_BO_ST_A_POSTINC
:
4602 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4604 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4606 case OPC2_32_BO_ST_A_PREINC
:
4607 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4609 case OPC2_32_BO_ST_B_SHORTOFF
:
4610 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4612 case OPC2_32_BO_ST_B_POSTINC
:
4613 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4615 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4617 case OPC2_32_BO_ST_B_PREINC
:
4618 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4620 case OPC2_32_BO_ST_D_SHORTOFF
:
4621 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4624 case OPC2_32_BO_ST_D_POSTINC
:
4625 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4626 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4628 case OPC2_32_BO_ST_D_PREINC
:
4629 temp
= tcg_temp_new();
4630 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4631 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4632 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4633 tcg_temp_free(temp
);
4635 case OPC2_32_BO_ST_DA_SHORTOFF
:
4636 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4639 case OPC2_32_BO_ST_DA_POSTINC
:
4640 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4641 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4643 case OPC2_32_BO_ST_DA_PREINC
:
4644 temp
= tcg_temp_new();
4645 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4646 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4647 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4648 tcg_temp_free(temp
);
4650 case OPC2_32_BO_ST_H_SHORTOFF
:
4651 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4653 case OPC2_32_BO_ST_H_POSTINC
:
4654 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4656 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4658 case OPC2_32_BO_ST_H_PREINC
:
4659 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4661 case OPC2_32_BO_ST_Q_SHORTOFF
:
4662 temp
= tcg_temp_new();
4663 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4664 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4665 tcg_temp_free(temp
);
4667 case OPC2_32_BO_ST_Q_POSTINC
:
4668 temp
= tcg_temp_new();
4669 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4670 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4672 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4673 tcg_temp_free(temp
);
4675 case OPC2_32_BO_ST_Q_PREINC
:
4676 temp
= tcg_temp_new();
4677 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4678 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4679 tcg_temp_free(temp
);
4681 case OPC2_32_BO_ST_W_SHORTOFF
:
4682 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4684 case OPC2_32_BO_ST_W_POSTINC
:
4685 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4687 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4689 case OPC2_32_BO_ST_W_PREINC
:
4690 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4695 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
4701 TCGv temp
, temp2
, temp3
;
4703 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4704 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4705 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4706 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4708 temp
= tcg_temp_new();
4709 temp2
= tcg_temp_new();
4710 temp3
= tcg_const_i32(off10
);
4712 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4713 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4716 case OPC2_32_BO_CACHEA_WI_BR
:
4717 case OPC2_32_BO_CACHEA_W_BR
:
4718 case OPC2_32_BO_CACHEA_I_BR
:
4719 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4721 case OPC2_32_BO_CACHEA_WI_CIRC
:
4722 case OPC2_32_BO_CACHEA_W_CIRC
:
4723 case OPC2_32_BO_CACHEA_I_CIRC
:
4724 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4726 case OPC2_32_BO_ST_A_BR
:
4727 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4728 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4730 case OPC2_32_BO_ST_A_CIRC
:
4731 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4732 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4734 case OPC2_32_BO_ST_B_BR
:
4735 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4736 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4738 case OPC2_32_BO_ST_B_CIRC
:
4739 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4740 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4742 case OPC2_32_BO_ST_D_BR
:
4743 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4744 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4746 case OPC2_32_BO_ST_D_CIRC
:
4747 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4748 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4749 tcg_gen_addi_tl(temp
, temp
, 4);
4750 tcg_gen_rem_tl(temp
, temp
, temp2
);
4751 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4752 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4753 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4755 case OPC2_32_BO_ST_DA_BR
:
4756 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4757 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4759 case OPC2_32_BO_ST_DA_CIRC
:
4760 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4761 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4762 tcg_gen_addi_tl(temp
, temp
, 4);
4763 tcg_gen_rem_tl(temp
, temp
, temp2
);
4764 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4765 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4766 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4768 case OPC2_32_BO_ST_H_BR
:
4769 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4770 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4772 case OPC2_32_BO_ST_H_CIRC
:
4773 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4774 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4776 case OPC2_32_BO_ST_Q_BR
:
4777 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4778 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4779 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4781 case OPC2_32_BO_ST_Q_CIRC
:
4782 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4783 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4784 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4786 case OPC2_32_BO_ST_W_BR
:
4787 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4788 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4790 case OPC2_32_BO_ST_W_CIRC
:
4791 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4792 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4795 tcg_temp_free(temp
);
4796 tcg_temp_free(temp2
);
4797 tcg_temp_free(temp3
);
4800 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
4808 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4809 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4810 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4811 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4814 case OPC2_32_BO_LD_A_SHORTOFF
:
4815 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4817 case OPC2_32_BO_LD_A_POSTINC
:
4818 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4820 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4822 case OPC2_32_BO_LD_A_PREINC
:
4823 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4825 case OPC2_32_BO_LD_B_SHORTOFF
:
4826 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4828 case OPC2_32_BO_LD_B_POSTINC
:
4829 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4831 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4833 case OPC2_32_BO_LD_B_PREINC
:
4834 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4836 case OPC2_32_BO_LD_BU_SHORTOFF
:
4837 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4839 case OPC2_32_BO_LD_BU_POSTINC
:
4840 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4842 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4844 case OPC2_32_BO_LD_BU_PREINC
:
4845 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4847 case OPC2_32_BO_LD_D_SHORTOFF
:
4848 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4851 case OPC2_32_BO_LD_D_POSTINC
:
4852 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4853 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4855 case OPC2_32_BO_LD_D_PREINC
:
4856 temp
= tcg_temp_new();
4857 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4858 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4859 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4860 tcg_temp_free(temp
);
4862 case OPC2_32_BO_LD_DA_SHORTOFF
:
4863 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4866 case OPC2_32_BO_LD_DA_POSTINC
:
4867 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4868 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4870 case OPC2_32_BO_LD_DA_PREINC
:
4871 temp
= tcg_temp_new();
4872 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4873 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4874 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4875 tcg_temp_free(temp
);
4877 case OPC2_32_BO_LD_H_SHORTOFF
:
4878 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4880 case OPC2_32_BO_LD_H_POSTINC
:
4881 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4883 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4885 case OPC2_32_BO_LD_H_PREINC
:
4886 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4888 case OPC2_32_BO_LD_HU_SHORTOFF
:
4889 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4891 case OPC2_32_BO_LD_HU_POSTINC
:
4892 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4894 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4896 case OPC2_32_BO_LD_HU_PREINC
:
4897 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4899 case OPC2_32_BO_LD_Q_SHORTOFF
:
4900 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4901 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4903 case OPC2_32_BO_LD_Q_POSTINC
:
4904 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4906 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4907 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4909 case OPC2_32_BO_LD_Q_PREINC
:
4910 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4911 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4913 case OPC2_32_BO_LD_W_SHORTOFF
:
4914 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4916 case OPC2_32_BO_LD_W_POSTINC
:
4917 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4919 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4921 case OPC2_32_BO_LD_W_PREINC
:
4922 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4927 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
4934 TCGv temp
, temp2
, temp3
;
4936 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4937 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4938 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4939 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4941 temp
= tcg_temp_new();
4942 temp2
= tcg_temp_new();
4943 temp3
= tcg_const_i32(off10
);
4945 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4946 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4950 case OPC2_32_BO_LD_A_BR
:
4951 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4952 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4954 case OPC2_32_BO_LD_A_CIRC
:
4955 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4956 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4958 case OPC2_32_BO_LD_B_BR
:
4959 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4960 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4962 case OPC2_32_BO_LD_B_CIRC
:
4963 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4964 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4966 case OPC2_32_BO_LD_BU_BR
:
4967 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4968 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4970 case OPC2_32_BO_LD_BU_CIRC
:
4971 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4972 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4974 case OPC2_32_BO_LD_D_BR
:
4975 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4976 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4978 case OPC2_32_BO_LD_D_CIRC
:
4979 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4980 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4981 tcg_gen_addi_tl(temp
, temp
, 4);
4982 tcg_gen_rem_tl(temp
, temp
, temp2
);
4983 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4984 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4985 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4987 case OPC2_32_BO_LD_DA_BR
:
4988 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4989 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4991 case OPC2_32_BO_LD_DA_CIRC
:
4992 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4993 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4994 tcg_gen_addi_tl(temp
, temp
, 4);
4995 tcg_gen_rem_tl(temp
, temp
, temp2
);
4996 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4997 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4998 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5000 case OPC2_32_BO_LD_H_BR
:
5001 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5002 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5004 case OPC2_32_BO_LD_H_CIRC
:
5005 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
5006 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5008 case OPC2_32_BO_LD_HU_BR
:
5009 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5010 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5012 case OPC2_32_BO_LD_HU_CIRC
:
5013 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5014 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5016 case OPC2_32_BO_LD_Q_BR
:
5017 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5018 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5019 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5021 case OPC2_32_BO_LD_Q_CIRC
:
5022 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
5023 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
5024 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5026 case OPC2_32_BO_LD_W_BR
:
5027 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5028 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5030 case OPC2_32_BO_LD_W_CIRC
:
5031 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
5032 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5035 tcg_temp_free(temp
);
5036 tcg_temp_free(temp2
);
5037 tcg_temp_free(temp3
);
5040 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
5049 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5050 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5051 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5052 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5055 temp
= tcg_temp_new();
5056 temp2
= tcg_temp_new();
5059 case OPC2_32_BO_LDLCX_SHORTOFF
:
5060 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5061 gen_helper_ldlcx(cpu_env
, temp
);
5063 case OPC2_32_BO_LDMST_SHORTOFF
:
5064 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5065 gen_ldmst(ctx
, r1
, temp
);
5067 case OPC2_32_BO_LDMST_POSTINC
:
5068 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5069 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5071 case OPC2_32_BO_LDMST_PREINC
:
5072 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5073 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
5075 case OPC2_32_BO_LDUCX_SHORTOFF
:
5076 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5077 gen_helper_lducx(cpu_env
, temp
);
5079 case OPC2_32_BO_LEA_SHORTOFF
:
5080 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
5082 case OPC2_32_BO_STLCX_SHORTOFF
:
5083 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5084 gen_helper_stlcx(cpu_env
, temp
);
5086 case OPC2_32_BO_STUCX_SHORTOFF
:
5087 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5088 gen_helper_stucx(cpu_env
, temp
);
5090 case OPC2_32_BO_SWAP_W_SHORTOFF
:
5091 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5092 gen_swap(ctx
, r1
, temp
);
5094 case OPC2_32_BO_SWAP_W_POSTINC
:
5095 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5096 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5098 case OPC2_32_BO_SWAP_W_PREINC
:
5099 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5100 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
5102 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
5103 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5104 gen_cmpswap(ctx
, r1
, temp
);
5106 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
5107 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5108 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5110 case OPC2_32_BO_CMPSWAP_W_PREINC
:
5111 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5112 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
5114 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
5115 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
5116 gen_swapmsk(ctx
, r1
, temp
);
5118 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
5119 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5120 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5122 case OPC2_32_BO_SWAPMSK_W_PREINC
:
5123 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
5124 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
5127 tcg_temp_free(temp
);
5128 tcg_temp_free(temp2
);
5131 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
5138 TCGv temp
, temp2
, temp3
;
5140 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
5141 r2
= MASK_OP_BO_S2(ctx
->opcode
);
5142 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
5143 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
5145 temp
= tcg_temp_new();
5146 temp2
= tcg_temp_new();
5147 temp3
= tcg_const_i32(off10
);
5149 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
5150 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
5153 case OPC2_32_BO_LDMST_BR
:
5154 gen_ldmst(ctx
, r1
, temp2
);
5155 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5157 case OPC2_32_BO_LDMST_CIRC
:
5158 gen_ldmst(ctx
, r1
, temp2
);
5159 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5161 case OPC2_32_BO_SWAP_W_BR
:
5162 gen_swap(ctx
, r1
, temp2
);
5163 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5165 case OPC2_32_BO_SWAP_W_CIRC
:
5166 gen_swap(ctx
, r1
, temp2
);
5167 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5169 case OPC2_32_BO_CMPSWAP_W_BR
:
5170 gen_cmpswap(ctx
, r1
, temp2
);
5171 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5173 case OPC2_32_BO_CMPSWAP_W_CIRC
:
5174 gen_cmpswap(ctx
, r1
, temp2
);
5175 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5177 case OPC2_32_BO_SWAPMSK_W_BR
:
5178 gen_swapmsk(ctx
, r1
, temp2
);
5179 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
5181 case OPC2_32_BO_SWAPMSK_W_CIRC
:
5182 gen_swapmsk(ctx
, r1
, temp2
);
5183 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
5187 tcg_temp_free(temp
);
5188 tcg_temp_free(temp2
);
5189 tcg_temp_free(temp3
);
5192 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
5198 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
5199 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
5200 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
5203 case OPC1_32_BOL_LD_A_LONGOFF
:
5204 temp
= tcg_temp_new();
5205 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5206 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5207 tcg_temp_free(temp
);
5209 case OPC1_32_BOL_LD_W_LONGOFF
:
5210 temp
= tcg_temp_new();
5211 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
5212 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
5213 tcg_temp_free(temp
);
5215 case OPC1_32_BOL_LEA_LONGOFF
:
5216 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
5218 case OPC1_32_BOL_ST_A_LONGOFF
:
5219 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5220 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5222 /* raise illegal opcode trap */
5225 case OPC1_32_BOL_ST_W_LONGOFF
:
5226 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
5228 case OPC1_32_BOL_LD_B_LONGOFF
:
5229 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5230 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5232 /* raise illegal opcode trap */
5235 case OPC1_32_BOL_LD_BU_LONGOFF
:
5236 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5237 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
5239 /* raise illegal opcode trap */
5242 case OPC1_32_BOL_LD_H_LONGOFF
:
5243 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5244 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5246 /* raise illegal opcode trap */
5249 case OPC1_32_BOL_LD_HU_LONGOFF
:
5250 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5251 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
5253 /* raise illegal opcode trap */
5256 case OPC1_32_BOL_ST_B_LONGOFF
:
5257 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5258 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
5260 /* raise illegal opcode trap */
5263 case OPC1_32_BOL_ST_H_LONGOFF
:
5264 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5265 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
5267 /* raise illegal opcode trap */
5274 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5281 r2
= MASK_OP_RC_D(ctx
->opcode
);
5282 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5283 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5284 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5286 temp
= tcg_temp_new();
5289 case OPC2_32_RC_AND
:
5290 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5292 case OPC2_32_RC_ANDN
:
5293 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5295 case OPC2_32_RC_NAND
:
5296 tcg_gen_movi_tl(temp
, const9
);
5297 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5299 case OPC2_32_RC_NOR
:
5300 tcg_gen_movi_tl(temp
, const9
);
5301 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5304 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5306 case OPC2_32_RC_ORN
:
5307 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
5310 const9
= sextract32(const9
, 0, 6);
5311 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5313 case OPC2_32_RC_SH_H
:
5314 const9
= sextract32(const9
, 0, 5);
5315 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5317 case OPC2_32_RC_SHA
:
5318 const9
= sextract32(const9
, 0, 6);
5319 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5321 case OPC2_32_RC_SHA_H
:
5322 const9
= sextract32(const9
, 0, 5);
5323 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5325 case OPC2_32_RC_SHAS
:
5326 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5328 case OPC2_32_RC_XNOR
:
5329 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5330 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5332 case OPC2_32_RC_XOR
:
5333 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5336 tcg_temp_free(temp
);
5339 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5347 r2
= MASK_OP_RC_D(ctx
->opcode
);
5348 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5349 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5351 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5353 temp
= tcg_temp_new();
5356 case OPC2_32_RC_ABSDIF
:
5357 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5359 case OPC2_32_RC_ABSDIFS
:
5360 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5362 case OPC2_32_RC_ADD
:
5363 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5365 case OPC2_32_RC_ADDC
:
5366 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5368 case OPC2_32_RC_ADDS
:
5369 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5371 case OPC2_32_RC_ADDS_U
:
5372 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5374 case OPC2_32_RC_ADDX
:
5375 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5377 case OPC2_32_RC_AND_EQ
:
5378 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5379 const9
, &tcg_gen_and_tl
);
5381 case OPC2_32_RC_AND_GE
:
5382 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5383 const9
, &tcg_gen_and_tl
);
5385 case OPC2_32_RC_AND_GE_U
:
5386 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5387 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5388 const9
, &tcg_gen_and_tl
);
5390 case OPC2_32_RC_AND_LT
:
5391 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5392 const9
, &tcg_gen_and_tl
);
5394 case OPC2_32_RC_AND_LT_U
:
5395 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5396 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5397 const9
, &tcg_gen_and_tl
);
5399 case OPC2_32_RC_AND_NE
:
5400 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5401 const9
, &tcg_gen_and_tl
);
5404 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5406 case OPC2_32_RC_EQANY_B
:
5407 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5409 case OPC2_32_RC_EQANY_H
:
5410 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5413 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5415 case OPC2_32_RC_GE_U
:
5416 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5417 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5420 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5422 case OPC2_32_RC_LT_U
:
5423 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5424 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5426 case OPC2_32_RC_MAX
:
5427 tcg_gen_movi_tl(temp
, const9
);
5428 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5429 cpu_gpr_d
[r1
], temp
);
5431 case OPC2_32_RC_MAX_U
:
5432 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5433 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5434 cpu_gpr_d
[r1
], temp
);
5436 case OPC2_32_RC_MIN
:
5437 tcg_gen_movi_tl(temp
, const9
);
5438 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5439 cpu_gpr_d
[r1
], temp
);
5441 case OPC2_32_RC_MIN_U
:
5442 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5443 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5444 cpu_gpr_d
[r1
], temp
);
5447 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5449 case OPC2_32_RC_OR_EQ
:
5450 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5451 const9
, &tcg_gen_or_tl
);
5453 case OPC2_32_RC_OR_GE
:
5454 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5455 const9
, &tcg_gen_or_tl
);
5457 case OPC2_32_RC_OR_GE_U
:
5458 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5459 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5460 const9
, &tcg_gen_or_tl
);
5462 case OPC2_32_RC_OR_LT
:
5463 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5464 const9
, &tcg_gen_or_tl
);
5466 case OPC2_32_RC_OR_LT_U
:
5467 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5468 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5469 const9
, &tcg_gen_or_tl
);
5471 case OPC2_32_RC_OR_NE
:
5472 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5473 const9
, &tcg_gen_or_tl
);
5475 case OPC2_32_RC_RSUB
:
5476 tcg_gen_movi_tl(temp
, const9
);
5477 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5479 case OPC2_32_RC_RSUBS
:
5480 tcg_gen_movi_tl(temp
, const9
);
5481 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5483 case OPC2_32_RC_RSUBS_U
:
5484 tcg_gen_movi_tl(temp
, const9
);
5485 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5487 case OPC2_32_RC_SH_EQ
:
5488 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5490 case OPC2_32_RC_SH_GE
:
5491 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5493 case OPC2_32_RC_SH_GE_U
:
5494 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5495 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5497 case OPC2_32_RC_SH_LT
:
5498 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5500 case OPC2_32_RC_SH_LT_U
:
5501 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5502 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5504 case OPC2_32_RC_SH_NE
:
5505 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5507 case OPC2_32_RC_XOR_EQ
:
5508 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5509 const9
, &tcg_gen_xor_tl
);
5511 case OPC2_32_RC_XOR_GE
:
5512 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5513 const9
, &tcg_gen_xor_tl
);
5515 case OPC2_32_RC_XOR_GE_U
:
5516 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5517 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5518 const9
, &tcg_gen_xor_tl
);
5520 case OPC2_32_RC_XOR_LT
:
5521 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5522 const9
, &tcg_gen_xor_tl
);
5524 case OPC2_32_RC_XOR_LT_U
:
5525 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5526 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5527 const9
, &tcg_gen_xor_tl
);
5529 case OPC2_32_RC_XOR_NE
:
5530 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5531 const9
, &tcg_gen_xor_tl
);
5534 tcg_temp_free(temp
);
5537 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
5542 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5543 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5546 case OPC2_32_RC_BISR
:
5547 gen_helper_1arg(bisr
, const9
);
5549 case OPC2_32_RC_SYSCALL
:
5550 /* TODO: Add exception generation */
5555 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5561 r2
= MASK_OP_RC_D(ctx
->opcode
);
5562 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5563 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5565 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5568 case OPC2_32_RC_MUL_32
:
5569 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5571 case OPC2_32_RC_MUL_64
:
5572 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5574 case OPC2_32_RC_MULS_32
:
5575 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5577 case OPC2_32_RC_MUL_U_64
:
5578 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5579 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5581 case OPC2_32_RC_MULS_U_32
:
5582 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5583 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5589 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5593 int32_t pos
, width
, const4
;
5597 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5598 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5599 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5600 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5601 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5602 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5605 case OPC2_32_RCPW_IMASK
:
5606 /* if pos + width > 31 undefined result */
5607 if (pos
+ width
<= 31) {
5608 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5609 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5612 case OPC2_32_RCPW_INSERT
:
5613 /* if pos + width > 32 undefined result */
5614 if (pos
+ width
<= 32) {
5615 temp
= tcg_const_i32(const4
);
5616 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5617 tcg_temp_free(temp
);
5625 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5629 int32_t width
, const4
;
5631 TCGv temp
, temp2
, temp3
;
5633 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5634 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5635 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5636 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5637 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5638 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5640 temp
= tcg_temp_new();
5641 temp2
= tcg_temp_new();
5644 case OPC2_32_RCRW_IMASK
:
5645 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
5646 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5647 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
5648 tcg_gen_movi_tl(temp2
, const4
);
5649 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
5651 case OPC2_32_RCRW_INSERT
:
5652 temp3
= tcg_temp_new();
5654 tcg_gen_movi_tl(temp
, width
);
5655 tcg_gen_movi_tl(temp2
, const4
);
5656 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
5657 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5659 tcg_temp_free(temp3
);
5662 tcg_temp_free(temp
);
5663 tcg_temp_free(temp2
);
5668 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5676 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5677 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5678 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5679 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5680 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5683 case OPC2_32_RCR_CADD
:
5684 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5687 case OPC2_32_RCR_CADDN
:
5688 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
5691 case OPC2_32_RCR_SEL
:
5692 temp
= tcg_const_i32(0);
5693 temp2
= tcg_const_i32(const9
);
5694 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5695 cpu_gpr_d
[r1
], temp2
);
5696 tcg_temp_free(temp
);
5697 tcg_temp_free(temp2
);
5699 case OPC2_32_RCR_SELN
:
5700 temp
= tcg_const_i32(0);
5701 temp2
= tcg_const_i32(const9
);
5702 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5703 cpu_gpr_d
[r1
], temp2
);
5704 tcg_temp_free(temp
);
5705 tcg_temp_free(temp2
);
5710 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5717 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5718 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5719 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5720 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5721 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5724 case OPC2_32_RCR_MADD_32
:
5725 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5727 case OPC2_32_RCR_MADD_64
:
5728 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5729 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5731 case OPC2_32_RCR_MADDS_32
:
5732 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5734 case OPC2_32_RCR_MADDS_64
:
5735 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5736 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5738 case OPC2_32_RCR_MADD_U_64
:
5739 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5740 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5741 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5743 case OPC2_32_RCR_MADDS_U_32
:
5744 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5745 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5747 case OPC2_32_RCR_MADDS_U_64
:
5748 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5749 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5750 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5755 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5762 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5763 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5764 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5765 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5766 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5769 case OPC2_32_RCR_MSUB_32
:
5770 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5772 case OPC2_32_RCR_MSUB_64
:
5773 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5774 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5776 case OPC2_32_RCR_MSUBS_32
:
5777 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5779 case OPC2_32_RCR_MSUBS_64
:
5780 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5781 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5783 case OPC2_32_RCR_MSUB_U_64
:
5784 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5785 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5786 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5788 case OPC2_32_RCR_MSUBS_U_32
:
5789 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5790 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5792 case OPC2_32_RCR_MSUBS_U_64
:
5793 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5794 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5795 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5802 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
5808 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5809 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5810 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5813 case OPC1_32_RLC_ADDI
:
5814 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5816 case OPC1_32_RLC_ADDIH
:
5817 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5819 case OPC1_32_RLC_ADDIH_A
:
5820 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5822 case OPC1_32_RLC_MFCR
:
5823 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5824 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
5826 case OPC1_32_RLC_MOV
:
5827 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5829 case OPC1_32_RLC_MOV_64
:
5830 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
5831 if ((r2
& 0x1) != 0) {
5832 /* TODO: raise OPD trap */
5834 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5835 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5837 /* TODO: raise illegal opcode trap */
5840 case OPC1_32_RLC_MOV_U
:
5841 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5842 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5844 case OPC1_32_RLC_MOV_H
:
5845 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5847 case OPC1_32_RLC_MOVH_A
:
5848 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5850 case OPC1_32_RLC_MTCR
:
5851 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5852 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
5858 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
5863 r3
= MASK_OP_RR_D(ctx
->opcode
);
5864 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5865 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5866 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5869 case OPC2_32_RR_ABS
:
5870 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5872 case OPC2_32_RR_ABS_B
:
5873 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5875 case OPC2_32_RR_ABS_H
:
5876 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5878 case OPC2_32_RR_ABSDIF
:
5879 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5881 case OPC2_32_RR_ABSDIF_B
:
5882 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5885 case OPC2_32_RR_ABSDIF_H
:
5886 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5889 case OPC2_32_RR_ABSDIFS
:
5890 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5893 case OPC2_32_RR_ABSDIFS_H
:
5894 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5897 case OPC2_32_RR_ABSS
:
5898 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5900 case OPC2_32_RR_ABSS_H
:
5901 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
5903 case OPC2_32_RR_ADD
:
5904 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5906 case OPC2_32_RR_ADD_B
:
5907 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5909 case OPC2_32_RR_ADD_H
:
5910 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5912 case OPC2_32_RR_ADDC
:
5913 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5915 case OPC2_32_RR_ADDS
:
5916 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5918 case OPC2_32_RR_ADDS_H
:
5919 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5922 case OPC2_32_RR_ADDS_HU
:
5923 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5926 case OPC2_32_RR_ADDS_U
:
5927 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5930 case OPC2_32_RR_ADDX
:
5931 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5933 case OPC2_32_RR_AND_EQ
:
5934 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5935 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5937 case OPC2_32_RR_AND_GE
:
5938 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5939 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5941 case OPC2_32_RR_AND_GE_U
:
5942 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5943 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5945 case OPC2_32_RR_AND_LT
:
5946 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5947 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5949 case OPC2_32_RR_AND_LT_U
:
5950 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5951 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5953 case OPC2_32_RR_AND_NE
:
5954 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5955 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5958 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5961 case OPC2_32_RR_EQ_B
:
5962 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5964 case OPC2_32_RR_EQ_H
:
5965 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5967 case OPC2_32_RR_EQ_W
:
5968 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5970 case OPC2_32_RR_EQANY_B
:
5971 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5973 case OPC2_32_RR_EQANY_H
:
5974 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5977 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5980 case OPC2_32_RR_GE_U
:
5981 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5985 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5988 case OPC2_32_RR_LT_U
:
5989 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5992 case OPC2_32_RR_LT_B
:
5993 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5995 case OPC2_32_RR_LT_BU
:
5996 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5998 case OPC2_32_RR_LT_H
:
5999 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6001 case OPC2_32_RR_LT_HU
:
6002 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6004 case OPC2_32_RR_LT_W
:
6005 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6007 case OPC2_32_RR_LT_WU
:
6008 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6010 case OPC2_32_RR_MAX
:
6011 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6012 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6014 case OPC2_32_RR_MAX_U
:
6015 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6016 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6018 case OPC2_32_RR_MAX_B
:
6019 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6021 case OPC2_32_RR_MAX_BU
:
6022 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6024 case OPC2_32_RR_MAX_H
:
6025 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6027 case OPC2_32_RR_MAX_HU
:
6028 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6030 case OPC2_32_RR_MIN
:
6031 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6032 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6034 case OPC2_32_RR_MIN_U
:
6035 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6036 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6038 case OPC2_32_RR_MIN_B
:
6039 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6041 case OPC2_32_RR_MIN_BU
:
6042 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6044 case OPC2_32_RR_MIN_H
:
6045 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6047 case OPC2_32_RR_MIN_HU
:
6048 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6050 case OPC2_32_RR_MOV
:
6051 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6054 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6057 case OPC2_32_RR_OR_EQ
:
6058 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6059 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6061 case OPC2_32_RR_OR_GE
:
6062 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6063 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6065 case OPC2_32_RR_OR_GE_U
:
6066 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6067 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6069 case OPC2_32_RR_OR_LT
:
6070 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6071 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6073 case OPC2_32_RR_OR_LT_U
:
6074 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6075 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6077 case OPC2_32_RR_OR_NE
:
6078 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6079 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
6081 case OPC2_32_RR_SAT_B
:
6082 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
6084 case OPC2_32_RR_SAT_BU
:
6085 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
6087 case OPC2_32_RR_SAT_H
:
6088 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
6090 case OPC2_32_RR_SAT_HU
:
6091 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
6093 case OPC2_32_RR_SH_EQ
:
6094 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6097 case OPC2_32_RR_SH_GE
:
6098 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6101 case OPC2_32_RR_SH_GE_U
:
6102 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6105 case OPC2_32_RR_SH_LT
:
6106 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6109 case OPC2_32_RR_SH_LT_U
:
6110 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6113 case OPC2_32_RR_SH_NE
:
6114 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6117 case OPC2_32_RR_SUB
:
6118 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6120 case OPC2_32_RR_SUB_B
:
6121 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6123 case OPC2_32_RR_SUB_H
:
6124 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6126 case OPC2_32_RR_SUBC
:
6127 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6129 case OPC2_32_RR_SUBS
:
6130 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6132 case OPC2_32_RR_SUBS_U
:
6133 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6135 case OPC2_32_RR_SUBS_H
:
6136 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6139 case OPC2_32_RR_SUBS_HU
:
6140 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6143 case OPC2_32_RR_SUBX
:
6144 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6146 case OPC2_32_RR_XOR_EQ
:
6147 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6148 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6150 case OPC2_32_RR_XOR_GE
:
6151 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6152 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6154 case OPC2_32_RR_XOR_GE_U
:
6155 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6156 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6158 case OPC2_32_RR_XOR_LT
:
6159 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6160 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6162 case OPC2_32_RR_XOR_LT_U
:
6163 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6164 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6166 case OPC2_32_RR_XOR_NE
:
6167 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6168 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
6173 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
6179 r3
= MASK_OP_RR_D(ctx
->opcode
);
6180 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6181 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6183 temp
= tcg_temp_new();
6184 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6187 case OPC2_32_RR_AND
:
6188 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6190 case OPC2_32_RR_ANDN
:
6191 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6193 case OPC2_32_RR_CLO
:
6194 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6196 case OPC2_32_RR_CLO_H
:
6197 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6199 case OPC2_32_RR_CLS
:
6200 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6202 case OPC2_32_RR_CLS_H
:
6203 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6205 case OPC2_32_RR_CLZ
:
6206 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6208 case OPC2_32_RR_CLZ_H
:
6209 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6211 case OPC2_32_RR_NAND
:
6212 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6214 case OPC2_32_RR_NOR
:
6215 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6218 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6220 case OPC2_32_RR_ORN
:
6221 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6224 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6226 case OPC2_32_RR_SH_H
:
6227 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6229 case OPC2_32_RR_SHA
:
6230 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6232 case OPC2_32_RR_SHA_H
:
6233 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6235 case OPC2_32_RR_SHAS
:
6236 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6238 case OPC2_32_RR_XNOR
:
6239 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6241 case OPC2_32_RR_XOR
:
6242 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6245 tcg_temp_free(temp
);
6248 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
6254 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6255 r3
= MASK_OP_RR_D(ctx
->opcode
);
6256 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6257 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6258 n
= MASK_OP_RR_N(ctx
->opcode
);
6261 case OPC2_32_RR_ADD_A
:
6262 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6264 case OPC2_32_RR_ADDSC_A
:
6265 temp
= tcg_temp_new();
6266 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6267 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6268 tcg_temp_free(temp
);
6270 case OPC2_32_RR_ADDSC_AT
:
6271 temp
= tcg_temp_new();
6272 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6273 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6274 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6275 tcg_temp_free(temp
);
6277 case OPC2_32_RR_EQ_A
:
6278 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6281 case OPC2_32_RR_EQZ
:
6282 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6284 case OPC2_32_RR_GE_A
:
6285 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6288 case OPC2_32_RR_LT_A
:
6289 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6292 case OPC2_32_RR_MOV_A
:
6293 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6295 case OPC2_32_RR_MOV_AA
:
6296 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6298 case OPC2_32_RR_MOV_D
:
6299 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6301 case OPC2_32_RR_NE_A
:
6302 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6305 case OPC2_32_RR_NEZ_A
:
6306 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6308 case OPC2_32_RR_SUB_A
:
6309 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6314 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
6319 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6320 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6324 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6326 case OPC2_32_RR_JLI
:
6327 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
6328 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6330 case OPC2_32_RR_CALLI
:
6331 gen_helper_1arg(call
, ctx
->next_pc
);
6332 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6334 case OPC2_32_RR_FCALLI
:
6335 gen_fcall_save_ctx(ctx
);
6336 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6340 ctx
->bstate
= BS_BRANCH
;
6343 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6348 TCGv temp
, temp2
, temp3
;
6350 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6351 r3
= MASK_OP_RR_D(ctx
->opcode
);
6352 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6353 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6356 case OPC2_32_RR_BMERGE
:
6357 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6359 case OPC2_32_RR_BSPLIT
:
6360 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6362 case OPC2_32_RR_DVINIT_B
:
6363 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6366 case OPC2_32_RR_DVINIT_BU
:
6367 temp
= tcg_temp_new();
6368 temp2
= tcg_temp_new();
6369 temp3
= tcg_temp_new();
6371 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6373 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6374 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6375 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6376 tcg_gen_neg_tl(temp
, temp3
);
6377 /* use cpu_PSW_AV to compare against 0 */
6378 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6380 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6381 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6382 temp2
, cpu_gpr_d
[r2
]);
6383 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6385 /* overflow = (D[b] == 0) */
6386 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6388 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6390 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6392 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6393 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6395 tcg_temp_free(temp
);
6396 tcg_temp_free(temp2
);
6397 tcg_temp_free(temp3
);
6399 case OPC2_32_RR_DVINIT_H
:
6400 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6403 case OPC2_32_RR_DVINIT_HU
:
6404 temp
= tcg_temp_new();
6405 temp2
= tcg_temp_new();
6406 temp3
= tcg_temp_new();
6408 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6410 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6411 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
6412 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6413 tcg_gen_neg_tl(temp
, temp3
);
6414 /* use cpu_PSW_AV to compare against 0 */
6415 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, temp3
, cpu_PSW_AV
,
6417 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
6418 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
6419 temp2
, cpu_gpr_d
[r2
]);
6420 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6422 /* overflow = (D[b] == 0) */
6423 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6425 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6427 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6429 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6430 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6431 tcg_temp_free(temp
);
6432 tcg_temp_free(temp2
);
6433 tcg_temp_free(temp3
);
6435 case OPC2_32_RR_DVINIT
:
6436 temp
= tcg_temp_new();
6437 temp2
= tcg_temp_new();
6438 /* overflow = ((D[b] == 0) ||
6439 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6440 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6441 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6442 tcg_gen_and_tl(temp
, temp
, temp2
);
6443 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6444 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6445 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6447 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6449 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6451 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6452 /* sign extend to high reg */
6453 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6454 tcg_temp_free(temp
);
6455 tcg_temp_free(temp2
);
6457 case OPC2_32_RR_DVINIT_U
:
6458 /* overflow = (D[b] == 0) */
6459 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6460 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6462 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6464 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6466 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6467 /* zero extend to high reg*/
6468 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6470 case OPC2_32_RR_PARITY
:
6471 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6473 case OPC2_32_RR_UNPACK
:
6474 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6476 case OPC2_32_RR_CRC32
:
6477 if (tricore_feature(env
, TRICORE_FEATURE_161
)) {
6478 gen_helper_crc32(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6479 } /* TODO: else raise illegal opcode trap */
6485 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6493 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6494 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6495 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6496 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
6497 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6500 case OPC2_32_RR1_MUL_H_32_LL
:
6501 temp64
= tcg_temp_new_i64();
6502 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6503 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6504 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6505 tcg_temp_free_i64(temp64
);
6507 case OPC2_32_RR1_MUL_H_32_LU
:
6508 temp64
= tcg_temp_new_i64();
6509 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6510 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6511 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6512 tcg_temp_free_i64(temp64
);
6514 case OPC2_32_RR1_MUL_H_32_UL
:
6515 temp64
= tcg_temp_new_i64();
6516 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6517 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6518 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6519 tcg_temp_free_i64(temp64
);
6521 case OPC2_32_RR1_MUL_H_32_UU
:
6522 temp64
= tcg_temp_new_i64();
6523 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6524 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6525 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6526 tcg_temp_free_i64(temp64
);
6528 case OPC2_32_RR1_MULM_H_64_LL
:
6529 temp64
= tcg_temp_new_i64();
6530 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6531 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6533 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6535 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6536 tcg_temp_free_i64(temp64
);
6538 case OPC2_32_RR1_MULM_H_64_LU
:
6539 temp64
= tcg_temp_new_i64();
6540 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6541 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6543 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6545 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6546 tcg_temp_free_i64(temp64
);
6548 case OPC2_32_RR1_MULM_H_64_UL
:
6549 temp64
= tcg_temp_new_i64();
6550 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6551 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6553 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6555 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6556 tcg_temp_free_i64(temp64
);
6558 case OPC2_32_RR1_MULM_H_64_UU
:
6559 temp64
= tcg_temp_new_i64();
6560 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6561 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6563 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6565 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6566 tcg_temp_free_i64(temp64
);
6569 case OPC2_32_RR1_MULR_H_16_LL
:
6570 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6571 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6573 case OPC2_32_RR1_MULR_H_16_LU
:
6574 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6575 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6577 case OPC2_32_RR1_MULR_H_16_UL
:
6578 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6579 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6581 case OPC2_32_RR1_MULR_H_16_UU
:
6582 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6583 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6589 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
6597 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6598 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6599 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6600 n
= MASK_OP_RR1_N(ctx
->opcode
);
6601 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6603 temp
= tcg_temp_new();
6604 temp2
= tcg_temp_new();
6607 case OPC2_32_RR1_MUL_Q_32
:
6608 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6610 case OPC2_32_RR1_MUL_Q_64
:
6611 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6614 case OPC2_32_RR1_MUL_Q_32_L
:
6615 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6616 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6618 case OPC2_32_RR1_MUL_Q_64_L
:
6619 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6620 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6622 case OPC2_32_RR1_MUL_Q_32_U
:
6623 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6624 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6626 case OPC2_32_RR1_MUL_Q_64_U
:
6627 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6628 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6630 case OPC2_32_RR1_MUL_Q_32_LL
:
6631 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6632 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6633 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6635 case OPC2_32_RR1_MUL_Q_32_UU
:
6636 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6637 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6638 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6640 case OPC2_32_RR1_MULR_Q_32_L
:
6641 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6642 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6643 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6645 case OPC2_32_RR1_MULR_Q_32_U
:
6646 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6647 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6648 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6651 tcg_temp_free(temp
);
6652 tcg_temp_free(temp2
);
6656 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
6661 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6662 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6663 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6664 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6666 case OPC2_32_RR2_MUL_32
:
6667 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6669 case OPC2_32_RR2_MUL_64
:
6670 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6673 case OPC2_32_RR2_MULS_32
:
6674 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6677 case OPC2_32_RR2_MUL_U_64
:
6678 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6681 case OPC2_32_RR2_MULS_U_32
:
6682 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
6689 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
6695 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6696 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6697 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6698 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6699 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6700 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6703 case OPC2_32_RRPW_EXTR
:
6704 if (pos
+ width
<= 31) {
6705 /* optimize special cases */
6706 if ((pos
== 0) && (width
== 8)) {
6707 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6708 } else if ((pos
== 0) && (width
== 16)) {
6709 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6711 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6712 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6716 case OPC2_32_RRPW_EXTR_U
:
6718 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6720 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6721 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6724 case OPC2_32_RRPW_IMASK
:
6725 if (pos
+ width
<= 31) {
6726 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
6727 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6730 case OPC2_32_RRPW_INSERT
:
6731 if (pos
+ width
<= 31) {
6732 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6740 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
6746 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6747 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6748 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6749 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6750 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6753 case OPC2_32_RRR_CADD
:
6754 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6755 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6757 case OPC2_32_RRR_CADDN
:
6758 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6761 case OPC2_32_RRR_CSUB
:
6762 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6765 case OPC2_32_RRR_CSUBN
:
6766 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6769 case OPC2_32_RRR_SEL
:
6770 temp
= tcg_const_i32(0);
6771 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6772 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6773 tcg_temp_free(temp
);
6775 case OPC2_32_RRR_SELN
:
6776 temp
= tcg_const_i32(0);
6777 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6778 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6779 tcg_temp_free(temp
);
6784 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
6790 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6791 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6792 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6793 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6794 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6797 case OPC2_32_RRR_DVADJ
:
6798 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6799 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6801 case OPC2_32_RRR_DVSTEP
:
6802 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6803 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6805 case OPC2_32_RRR_DVSTEP_U
:
6806 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6807 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6809 case OPC2_32_RRR_IXMAX
:
6810 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6811 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6813 case OPC2_32_RRR_IXMAX_U
:
6814 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6815 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6817 case OPC2_32_RRR_IXMIN
:
6818 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6819 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6821 case OPC2_32_RRR_IXMIN_U
:
6822 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6823 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6825 case OPC2_32_RRR_PACK
:
6826 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6827 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6833 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6836 uint32_t r1
, r2
, r3
, r4
;
6838 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6839 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6840 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6841 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6842 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6844 case OPC2_32_RRR2_MADD_32
:
6845 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6848 case OPC2_32_RRR2_MADD_64
:
6849 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6850 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6852 case OPC2_32_RRR2_MADDS_32
:
6853 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6854 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6856 case OPC2_32_RRR2_MADDS_64
:
6857 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6858 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6860 case OPC2_32_RRR2_MADD_U_64
:
6861 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6862 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6864 case OPC2_32_RRR2_MADDS_U_32
:
6865 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6866 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6868 case OPC2_32_RRR2_MADDS_U_64
:
6869 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6870 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6875 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
6878 uint32_t r1
, r2
, r3
, r4
;
6880 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6881 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6882 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6883 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6884 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6887 case OPC2_32_RRR2_MSUB_32
:
6888 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6891 case OPC2_32_RRR2_MSUB_64
:
6892 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6893 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6895 case OPC2_32_RRR2_MSUBS_32
:
6896 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6897 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6899 case OPC2_32_RRR2_MSUBS_64
:
6900 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6901 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6903 case OPC2_32_RRR2_MSUB_U_64
:
6904 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6905 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6907 case OPC2_32_RRR2_MSUBS_U_32
:
6908 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
6909 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6911 case OPC2_32_RRR2_MSUBS_U_64
:
6912 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6913 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6919 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
6922 uint32_t r1
, r2
, r3
, r4
, n
;
6924 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6925 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6926 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6927 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6928 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6929 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6932 case OPC2_32_RRR1_MADD_H_LL
:
6933 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6934 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6936 case OPC2_32_RRR1_MADD_H_LU
:
6937 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6938 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6940 case OPC2_32_RRR1_MADD_H_UL
:
6941 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6942 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6944 case OPC2_32_RRR1_MADD_H_UU
:
6945 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6946 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6948 case OPC2_32_RRR1_MADDS_H_LL
:
6949 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6950 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6952 case OPC2_32_RRR1_MADDS_H_LU
:
6953 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6954 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6956 case OPC2_32_RRR1_MADDS_H_UL
:
6957 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6958 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6960 case OPC2_32_RRR1_MADDS_H_UU
:
6961 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6962 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6964 case OPC2_32_RRR1_MADDM_H_LL
:
6965 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6966 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6968 case OPC2_32_RRR1_MADDM_H_LU
:
6969 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6970 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6972 case OPC2_32_RRR1_MADDM_H_UL
:
6973 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6974 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6976 case OPC2_32_RRR1_MADDM_H_UU
:
6977 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6978 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6980 case OPC2_32_RRR1_MADDMS_H_LL
:
6981 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6982 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6984 case OPC2_32_RRR1_MADDMS_H_LU
:
6985 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6986 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6988 case OPC2_32_RRR1_MADDMS_H_UL
:
6989 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6990 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6992 case OPC2_32_RRR1_MADDMS_H_UU
:
6993 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6994 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6996 case OPC2_32_RRR1_MADDR_H_LL
:
6997 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6998 cpu_gpr_d
[r2
], n
, MODE_LL
);
7000 case OPC2_32_RRR1_MADDR_H_LU
:
7001 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7002 cpu_gpr_d
[r2
], n
, MODE_LU
);
7004 case OPC2_32_RRR1_MADDR_H_UL
:
7005 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7006 cpu_gpr_d
[r2
], n
, MODE_UL
);
7008 case OPC2_32_RRR1_MADDR_H_UU
:
7009 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7010 cpu_gpr_d
[r2
], n
, MODE_UU
);
7012 case OPC2_32_RRR1_MADDRS_H_LL
:
7013 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7014 cpu_gpr_d
[r2
], n
, MODE_LL
);
7016 case OPC2_32_RRR1_MADDRS_H_LU
:
7017 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7018 cpu_gpr_d
[r2
], n
, MODE_LU
);
7020 case OPC2_32_RRR1_MADDRS_H_UL
:
7021 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7022 cpu_gpr_d
[r2
], n
, MODE_UL
);
7024 case OPC2_32_RRR1_MADDRS_H_UU
:
7025 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7026 cpu_gpr_d
[r2
], n
, MODE_UU
);
7031 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7034 uint32_t r1
, r2
, r3
, r4
, n
;
7037 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7038 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7039 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7040 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7041 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7042 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7044 temp
= tcg_const_i32(n
);
7045 temp2
= tcg_temp_new();
7048 case OPC2_32_RRR1_MADD_Q_32
:
7049 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7050 cpu_gpr_d
[r2
], n
, 32, env
);
7052 case OPC2_32_RRR1_MADD_Q_64
:
7053 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7054 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7057 case OPC2_32_RRR1_MADD_Q_32_L
:
7058 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7059 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7062 case OPC2_32_RRR1_MADD_Q_64_L
:
7063 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7064 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7065 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7068 case OPC2_32_RRR1_MADD_Q_32_U
:
7069 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7070 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7073 case OPC2_32_RRR1_MADD_Q_64_U
:
7074 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7075 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7076 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7079 case OPC2_32_RRR1_MADD_Q_32_LL
:
7080 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7081 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7082 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7084 case OPC2_32_RRR1_MADD_Q_64_LL
:
7085 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7086 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7087 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7088 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7090 case OPC2_32_RRR1_MADD_Q_32_UU
:
7091 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7092 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7093 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7095 case OPC2_32_RRR1_MADD_Q_64_UU
:
7096 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7097 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7098 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7099 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7101 case OPC2_32_RRR1_MADDS_Q_32
:
7102 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7103 cpu_gpr_d
[r2
], n
, 32);
7105 case OPC2_32_RRR1_MADDS_Q_64
:
7106 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7107 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7110 case OPC2_32_RRR1_MADDS_Q_32_L
:
7111 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7112 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7115 case OPC2_32_RRR1_MADDS_Q_64_L
:
7116 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7117 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7118 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7121 case OPC2_32_RRR1_MADDS_Q_32_U
:
7122 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7123 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7126 case OPC2_32_RRR1_MADDS_Q_64_U
:
7127 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7128 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7129 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7132 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7133 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7134 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7135 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7137 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7138 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7139 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7140 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7141 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7143 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7144 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7145 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7146 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7148 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7149 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7150 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7151 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7152 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7154 case OPC2_32_RRR1_MADDR_H_64_UL
:
7155 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7156 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7158 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7159 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7160 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7162 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7163 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7164 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7165 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7167 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7168 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7169 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7170 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7172 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7173 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7174 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7175 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7177 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7178 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7179 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7180 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7183 tcg_temp_free(temp
);
7184 tcg_temp_free(temp2
);
7187 static void decode_rrr1_maddsu_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7190 uint32_t r1
, r2
, r3
, r4
, n
;
7192 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7193 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7194 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7195 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7196 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7197 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7200 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7201 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7202 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7204 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7205 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7206 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7208 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7209 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7210 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7212 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7213 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7214 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7216 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7217 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7218 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7221 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7222 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7223 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7226 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7227 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7228 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7231 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7232 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7233 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7236 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7237 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7238 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7241 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7242 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7243 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7246 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7247 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7248 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7251 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7252 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7253 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7256 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7257 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7258 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7261 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7262 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7263 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7266 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7267 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7268 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7271 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7272 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7273 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7276 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7277 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7278 cpu_gpr_d
[r2
], n
, MODE_LL
);
7280 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7281 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7282 cpu_gpr_d
[r2
], n
, MODE_LU
);
7284 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7285 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7286 cpu_gpr_d
[r2
], n
, MODE_UL
);
7288 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7289 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7290 cpu_gpr_d
[r2
], n
, MODE_UU
);
7292 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7293 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7294 cpu_gpr_d
[r2
], n
, MODE_LL
);
7296 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7297 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7298 cpu_gpr_d
[r2
], n
, MODE_LU
);
7300 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7301 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7302 cpu_gpr_d
[r2
], n
, MODE_UL
);
7304 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7305 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7306 cpu_gpr_d
[r2
], n
, MODE_UU
);
7311 static void decode_rrr1_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
7314 uint32_t r1
, r2
, r3
, r4
, n
;
7316 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7317 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7318 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7319 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7320 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7321 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7324 case OPC2_32_RRR1_MSUB_H_LL
:
7325 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7326 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7328 case OPC2_32_RRR1_MSUB_H_LU
:
7329 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7330 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7332 case OPC2_32_RRR1_MSUB_H_UL
:
7333 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7334 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7336 case OPC2_32_RRR1_MSUB_H_UU
:
7337 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7338 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7340 case OPC2_32_RRR1_MSUBS_H_LL
:
7341 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7342 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7344 case OPC2_32_RRR1_MSUBS_H_LU
:
7345 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7346 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7348 case OPC2_32_RRR1_MSUBS_H_UL
:
7349 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7350 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7352 case OPC2_32_RRR1_MSUBS_H_UU
:
7353 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7354 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7356 case OPC2_32_RRR1_MSUBM_H_LL
:
7357 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7358 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7360 case OPC2_32_RRR1_MSUBM_H_LU
:
7361 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7362 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7364 case OPC2_32_RRR1_MSUBM_H_UL
:
7365 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7366 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7368 case OPC2_32_RRR1_MSUBM_H_UU
:
7369 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7370 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7372 case OPC2_32_RRR1_MSUBMS_H_LL
:
7373 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7374 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7376 case OPC2_32_RRR1_MSUBMS_H_LU
:
7377 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7378 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7380 case OPC2_32_RRR1_MSUBMS_H_UL
:
7381 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7382 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7384 case OPC2_32_RRR1_MSUBMS_H_UU
:
7385 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7386 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7388 case OPC2_32_RRR1_MSUBR_H_LL
:
7389 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7390 cpu_gpr_d
[r2
], n
, MODE_LL
);
7392 case OPC2_32_RRR1_MSUBR_H_LU
:
7393 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7394 cpu_gpr_d
[r2
], n
, MODE_LU
);
7396 case OPC2_32_RRR1_MSUBR_H_UL
:
7397 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7398 cpu_gpr_d
[r2
], n
, MODE_UL
);
7400 case OPC2_32_RRR1_MSUBR_H_UU
:
7401 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7402 cpu_gpr_d
[r2
], n
, MODE_UU
);
7404 case OPC2_32_RRR1_MSUBRS_H_LL
:
7405 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7406 cpu_gpr_d
[r2
], n
, MODE_LL
);
7408 case OPC2_32_RRR1_MSUBRS_H_LU
:
7409 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7410 cpu_gpr_d
[r2
], n
, MODE_LU
);
7412 case OPC2_32_RRR1_MSUBRS_H_UL
:
7413 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7414 cpu_gpr_d
[r2
], n
, MODE_UL
);
7416 case OPC2_32_RRR1_MSUBRS_H_UU
:
7417 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7418 cpu_gpr_d
[r2
], n
, MODE_UU
);
7423 static void decode_rrr1_msubq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7426 uint32_t r1
, r2
, r3
, r4
, n
;
7429 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7430 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7431 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7432 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7433 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7434 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7436 temp
= tcg_const_i32(n
);
7437 temp2
= tcg_temp_new();
7440 case OPC2_32_RRR1_MSUB_Q_32
:
7441 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7442 cpu_gpr_d
[r2
], n
, 32, env
);
7444 case OPC2_32_RRR1_MSUB_Q_64
:
7445 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7446 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7449 case OPC2_32_RRR1_MSUB_Q_32_L
:
7450 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7451 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7454 case OPC2_32_RRR1_MSUB_Q_64_L
:
7455 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7456 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7457 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7460 case OPC2_32_RRR1_MSUB_Q_32_U
:
7461 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7462 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7465 case OPC2_32_RRR1_MSUB_Q_64_U
:
7466 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7467 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7468 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7471 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7472 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7473 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7474 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7476 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7477 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7478 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7479 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7480 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7482 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7483 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7484 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7485 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7487 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7488 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7489 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7490 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7491 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7493 case OPC2_32_RRR1_MSUBS_Q_32
:
7494 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7495 cpu_gpr_d
[r2
], n
, 32);
7497 case OPC2_32_RRR1_MSUBS_Q_64
:
7498 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7499 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7502 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7503 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7504 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7507 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7508 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7509 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7510 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7513 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7514 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7515 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7518 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7519 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7520 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7521 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7524 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7525 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7526 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7527 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7529 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7530 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7531 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7532 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7533 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7535 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7536 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7537 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7538 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7540 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7541 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7542 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7543 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7544 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7546 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7547 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7548 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7550 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7551 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7552 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7554 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7555 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7556 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7557 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7559 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7560 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7561 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7562 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7564 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7565 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7566 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7567 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7569 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7570 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7571 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7572 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7575 tcg_temp_free(temp
);
7576 tcg_temp_free(temp2
);
7579 static void decode_rrr1_msubad_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
7582 uint32_t r1
, r2
, r3
, r4
, n
;
7584 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7585 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7586 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7587 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7588 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7589 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7592 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7593 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7594 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7596 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7597 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7598 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7600 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7601 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7602 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7604 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7605 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7606 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7608 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7609 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7610 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7613 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7614 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7615 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7618 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7619 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7620 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7623 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7624 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7625 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7628 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7629 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7630 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7633 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7634 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7635 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7638 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7639 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7640 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7643 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7644 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7645 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7648 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7649 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7650 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7653 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7654 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7655 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7658 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7659 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7660 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7663 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7664 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7665 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7668 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7669 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7670 cpu_gpr_d
[r2
], n
, MODE_LL
);
7672 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7673 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7674 cpu_gpr_d
[r2
], n
, MODE_LU
);
7676 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7677 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7678 cpu_gpr_d
[r2
], n
, MODE_UL
);
7680 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7681 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7682 cpu_gpr_d
[r2
], n
, MODE_UU
);
7684 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7685 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7686 cpu_gpr_d
[r2
], n
, MODE_LL
);
7688 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7689 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7690 cpu_gpr_d
[r2
], n
, MODE_LU
);
7692 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7693 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7694 cpu_gpr_d
[r2
], n
, MODE_UL
);
7696 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7697 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7698 cpu_gpr_d
[r2
], n
, MODE_UU
);
7704 static void decode_rrrr_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7708 TCGv tmp_width
, tmp_pos
;
7710 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7711 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7712 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7713 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7714 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7716 tmp_pos
= tcg_temp_new();
7717 tmp_width
= tcg_temp_new();
7720 case OPC2_32_RRRR_DEXTR
:
7721 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7723 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7725 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7726 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7727 tcg_gen_shr_tl(tmp_pos
, cpu_gpr_d
[r2
], tmp_pos
);
7728 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, tmp_pos
);
7731 case OPC2_32_RRRR_EXTR
:
7732 case OPC2_32_RRRR_EXTR_U
:
7733 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7734 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7735 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7736 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7737 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7738 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7739 if (op2
== OPC2_32_RRRR_EXTR
) {
7740 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7742 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7745 case OPC2_32_RRRR_INSERT
:
7746 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7747 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7748 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7752 tcg_temp_free(tmp_pos
);
7753 tcg_temp_free(tmp_width
);
7757 static void decode_rrrw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
7765 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7766 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7767 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7768 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7769 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7770 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7772 temp
= tcg_temp_new();
7775 case OPC2_32_RRRW_EXTR
:
7776 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7777 tcg_gen_addi_tl(temp
, temp
, width
);
7778 tcg_gen_subfi_tl(temp
, 32, temp
);
7779 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7780 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7782 case OPC2_32_RRRW_EXTR_U
:
7784 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7786 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7787 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7788 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7791 case OPC2_32_RRRW_IMASK
:
7792 temp2
= tcg_temp_new();
7794 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7795 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7796 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7797 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7798 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7800 tcg_temp_free(temp2
);
7802 case OPC2_32_RRRW_INSERT
:
7803 temp2
= tcg_temp_new();
7805 tcg_gen_movi_tl(temp
, width
);
7806 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7807 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7809 tcg_temp_free(temp2
);
7812 tcg_temp_free(temp
);
7816 static void decode_sys_interrupts(CPUTriCoreState
*env
, DisasContext
*ctx
)
7823 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7824 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7827 case OPC2_32_SYS_DEBUG
:
7828 /* raise EXCP_DEBUG */
7830 case OPC2_32_SYS_DISABLE
:
7831 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~MASK_ICR_IE
);
7833 case OPC2_32_SYS_DSYNC
:
7835 case OPC2_32_SYS_ENABLE
:
7836 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, MASK_ICR_IE
);
7838 case OPC2_32_SYS_ISYNC
:
7840 case OPC2_32_SYS_NOP
:
7842 case OPC2_32_SYS_RET
:
7843 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7845 case OPC2_32_SYS_RFE
:
7846 gen_helper_rfe(cpu_env
);
7848 ctx
->bstate
= BS_BRANCH
;
7850 case OPC2_32_SYS_RFM
:
7851 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
7852 tmp
= tcg_temp_new();
7853 l1
= gen_new_label();
7855 tcg_gen_ld32u_tl(tmp
, cpu_env
, offsetof(CPUTriCoreState
, DBGSR
));
7856 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7857 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7858 gen_helper_rfm(cpu_env
);
7861 ctx
->bstate
= BS_BRANCH
;
7864 /* generate privilege trap */
7867 case OPC2_32_SYS_RSLCX
:
7868 gen_helper_rslcx(cpu_env
);
7870 case OPC2_32_SYS_SVLCX
:
7871 gen_helper_svlcx(cpu_env
);
7873 case OPC2_32_SYS_RESTORE
:
7874 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
7875 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
||
7876 (ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_UM1
) {
7877 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
], 8, 1);
7878 } /* else raise privilege trap */
7879 } /* else raise illegal opcode trap */
7881 case OPC2_32_SYS_TRAPSV
:
7882 /* TODO: raise sticky overflow trap */
7884 case OPC2_32_SYS_TRAPV
:
7885 /* TODO: raise overflow trap */
7890 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
7894 int32_t address
, const16
;
7897 TCGv temp
, temp2
, temp3
;
7899 op1
= MASK_OP_MAJOR(ctx
->opcode
);
7901 /* handle JNZ.T opcode only being 7 bit long */
7902 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
7903 op1
= OPCM_32_BRN_JTT
;
7908 case OPCM_32_ABS_LDW
:
7909 decode_abs_ldw(env
, ctx
);
7911 case OPCM_32_ABS_LDB
:
7912 decode_abs_ldb(env
, ctx
);
7914 case OPCM_32_ABS_LDMST_SWAP
:
7915 decode_abs_ldst_swap(env
, ctx
);
7917 case OPCM_32_ABS_LDST_CONTEXT
:
7918 decode_abs_ldst_context(env
, ctx
);
7920 case OPCM_32_ABS_STORE
:
7921 decode_abs_store(env
, ctx
);
7923 case OPCM_32_ABS_STOREB_H
:
7924 decode_abs_storeb_h(env
, ctx
);
7926 case OPC1_32_ABS_STOREQ
:
7927 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7928 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7929 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7930 temp2
= tcg_temp_new();
7932 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
7933 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
7935 tcg_temp_free(temp2
);
7936 tcg_temp_free(temp
);
7938 case OPC1_32_ABS_LD_Q
:
7939 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7940 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7941 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7943 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
7944 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
7946 tcg_temp_free(temp
);
7948 case OPC1_32_ABS_LEA
:
7949 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7950 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
7951 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
7954 case OPC1_32_ABSB_ST_T
:
7955 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
7956 b
= MASK_OP_ABSB_B(ctx
->opcode
);
7957 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
7959 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
7960 temp2
= tcg_temp_new();
7962 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7963 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
7964 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
7965 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
7967 tcg_temp_free(temp
);
7968 tcg_temp_free(temp2
);
7971 case OPC1_32_B_CALL
:
7972 case OPC1_32_B_CALLA
:
7973 case OPC1_32_B_FCALL
:
7974 case OPC1_32_B_FCALLA
:
7979 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
7980 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
7983 case OPCM_32_BIT_ANDACC
:
7984 decode_bit_andacc(env
, ctx
);
7986 case OPCM_32_BIT_LOGICAL_T1
:
7987 decode_bit_logical_t(env
, ctx
);
7989 case OPCM_32_BIT_INSERT
:
7990 decode_bit_insert(env
, ctx
);
7992 case OPCM_32_BIT_LOGICAL_T2
:
7993 decode_bit_logical_t2(env
, ctx
);
7995 case OPCM_32_BIT_ORAND
:
7996 decode_bit_orand(env
, ctx
);
7998 case OPCM_32_BIT_SH_LOGIC1
:
7999 decode_bit_sh_logic1(env
, ctx
);
8001 case OPCM_32_BIT_SH_LOGIC2
:
8002 decode_bit_sh_logic2(env
, ctx
);
8005 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8006 decode_bo_addrmode_post_pre_base(env
, ctx
);
8008 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8009 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
8011 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8012 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
8014 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8015 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
8017 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8018 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
8020 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8021 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
8024 case OPC1_32_BOL_LD_A_LONGOFF
:
8025 case OPC1_32_BOL_LD_W_LONGOFF
:
8026 case OPC1_32_BOL_LEA_LONGOFF
:
8027 case OPC1_32_BOL_ST_W_LONGOFF
:
8028 case OPC1_32_BOL_ST_A_LONGOFF
:
8029 case OPC1_32_BOL_LD_B_LONGOFF
:
8030 case OPC1_32_BOL_LD_BU_LONGOFF
:
8031 case OPC1_32_BOL_LD_H_LONGOFF
:
8032 case OPC1_32_BOL_LD_HU_LONGOFF
:
8033 case OPC1_32_BOL_ST_B_LONGOFF
:
8034 case OPC1_32_BOL_ST_H_LONGOFF
:
8035 decode_bol_opc(env
, ctx
, op1
);
8038 case OPCM_32_BRC_EQ_NEQ
:
8039 case OPCM_32_BRC_GE
:
8040 case OPCM_32_BRC_JLT
:
8041 case OPCM_32_BRC_JNE
:
8042 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8043 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8044 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8045 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8048 case OPCM_32_BRN_JTT
:
8049 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8050 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8051 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8054 case OPCM_32_BRR_EQ_NEQ
:
8055 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8056 case OPCM_32_BRR_GE
:
8057 case OPCM_32_BRR_JLT
:
8058 case OPCM_32_BRR_JNE
:
8059 case OPCM_32_BRR_JNZ
:
8060 case OPCM_32_BRR_LOOP
:
8061 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8062 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8063 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8064 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8067 case OPCM_32_RC_LOGICAL_SHIFT
:
8068 decode_rc_logical_shift(env
, ctx
);
8070 case OPCM_32_RC_ACCUMULATOR
:
8071 decode_rc_accumulator(env
, ctx
);
8073 case OPCM_32_RC_SERVICEROUTINE
:
8074 decode_rc_serviceroutine(env
, ctx
);
8076 case OPCM_32_RC_MUL
:
8077 decode_rc_mul(env
, ctx
);
8080 case OPCM_32_RCPW_MASK_INSERT
:
8081 decode_rcpw_insert(env
, ctx
);
8084 case OPC1_32_RCRR_INSERT
:
8085 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8086 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8087 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8088 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8089 temp
= tcg_const_i32(const16
);
8090 temp2
= tcg_temp_new(); /* width*/
8091 temp3
= tcg_temp_new(); /* pos */
8093 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
8094 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
8096 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8098 tcg_temp_free(temp
);
8099 tcg_temp_free(temp2
);
8100 tcg_temp_free(temp3
);
8103 case OPCM_32_RCRW_MASK_INSERT
:
8104 decode_rcrw_insert(env
, ctx
);
8107 case OPCM_32_RCR_COND_SELECT
:
8108 decode_rcr_cond_select(env
, ctx
);
8110 case OPCM_32_RCR_MADD
:
8111 decode_rcr_madd(env
, ctx
);
8113 case OPCM_32_RCR_MSUB
:
8114 decode_rcr_msub(env
, ctx
);
8117 case OPC1_32_RLC_ADDI
:
8118 case OPC1_32_RLC_ADDIH
:
8119 case OPC1_32_RLC_ADDIH_A
:
8120 case OPC1_32_RLC_MFCR
:
8121 case OPC1_32_RLC_MOV
:
8122 case OPC1_32_RLC_MOV_64
:
8123 case OPC1_32_RLC_MOV_U
:
8124 case OPC1_32_RLC_MOV_H
:
8125 case OPC1_32_RLC_MOVH_A
:
8126 case OPC1_32_RLC_MTCR
:
8127 decode_rlc_opc(env
, ctx
, op1
);
8130 case OPCM_32_RR_ACCUMULATOR
:
8131 decode_rr_accumulator(env
, ctx
);
8133 case OPCM_32_RR_LOGICAL_SHIFT
:
8134 decode_rr_logical_shift(env
, ctx
);
8136 case OPCM_32_RR_ADDRESS
:
8137 decode_rr_address(env
, ctx
);
8139 case OPCM_32_RR_IDIRECT
:
8140 decode_rr_idirect(env
, ctx
);
8142 case OPCM_32_RR_DIVIDE
:
8143 decode_rr_divide(env
, ctx
);
8146 case OPCM_32_RR1_MUL
:
8147 decode_rr1_mul(env
, ctx
);
8149 case OPCM_32_RR1_MULQ
:
8150 decode_rr1_mulq(env
, ctx
);
8153 case OPCM_32_RR2_MUL
:
8154 decode_rr2_mul(env
, ctx
);
8157 case OPCM_32_RRPW_EXTRACT_INSERT
:
8158 decode_rrpw_extract_insert(env
, ctx
);
8160 case OPC1_32_RRPW_DEXTR
:
8161 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8162 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8163 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8164 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8166 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
8168 temp
= tcg_temp_new();
8169 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], const16
);
8170 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], 32 - const16
);
8171 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
8172 tcg_temp_free(temp
);
8176 case OPCM_32_RRR_COND_SELECT
:
8177 decode_rrr_cond_select(env
, ctx
);
8179 case OPCM_32_RRR_DIVIDE
:
8180 decode_rrr_divide(env
, ctx
);
8182 case OPCM_32_RRR2_MADD
:
8183 decode_rrr2_madd(env
, ctx
);
8185 case OPCM_32_RRR2_MSUB
:
8186 decode_rrr2_msub(env
, ctx
);
8189 case OPCM_32_RRR1_MADD
:
8190 decode_rrr1_madd(env
, ctx
);
8192 case OPCM_32_RRR1_MADDQ_H
:
8193 decode_rrr1_maddq_h(env
, ctx
);
8195 case OPCM_32_RRR1_MADDSU_H
:
8196 decode_rrr1_maddsu_h(env
, ctx
);
8198 case OPCM_32_RRR1_MSUB_H
:
8199 decode_rrr1_msub(env
, ctx
);
8201 case OPCM_32_RRR1_MSUB_Q
:
8202 decode_rrr1_msubq_h(env
, ctx
);
8204 case OPCM_32_RRR1_MSUBAD_H
:
8205 decode_rrr1_msubad_h(env
, ctx
);
8208 case OPCM_32_RRRR_EXTRACT_INSERT
:
8209 decode_rrrr_extract_insert(env
, ctx
);
8211 case OPCM_32_RRRW_EXTRACT_INSERT
:
8212 decode_rrrw_extract_insert(env
, ctx
);
8215 case OPCM_32_SYS_INTERRUPTS
:
8216 decode_sys_interrupts(env
, ctx
);
8218 case OPC1_32_SYS_RSTV
:
8219 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8220 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8221 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8222 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8227 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
8229 /* 16-Bit Instruction */
8230 if ((ctx
->opcode
& 0x1) == 0) {
8231 ctx
->next_pc
= ctx
->pc
+ 2;
8232 decode_16Bit_opc(env
, ctx
);
8233 /* 32-Bit Instruction */
8235 ctx
->next_pc
= ctx
->pc
+ 4;
8236 decode_32Bit_opc(env
, ctx
);
8241 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
8244 CPUState
*cs
= CPU(cpu
);
8245 CPUTriCoreState
*env
= &cpu
->env
;
8247 target_ulong pc_start
;
8251 qemu_log("search pc %d\n", search_pc
);
8259 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
8260 ctx
.bstate
= BS_NONE
;
8261 ctx
.mem_idx
= cpu_mmu_index(env
);
8263 tcg_clear_temp_count();
8265 while (ctx
.bstate
== BS_NONE
) {
8266 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
8267 decode_opc(env
, &ctx
, 0);
8271 if (tcg_op_buf_full()) {
8272 gen_save_pc(ctx
.next_pc
);
8277 gen_save_pc(ctx
.next_pc
);
8281 ctx
.pc
= ctx
.next_pc
;
8284 gen_tb_end(tb
, num_insns
);
8286 printf("done_generating search pc\n");
8288 tb
->size
= ctx
.pc
- pc_start
;
8289 tb
->icount
= num_insns
;
8291 if (tcg_check_temp_count()) {
8292 printf("LEAK at %08x\n", env
->PC
);
8296 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8297 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8298 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
8305 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8307 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
8311 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
8313 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
8317 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
8319 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
8327 void cpu_state_reset(CPUTriCoreState
*env
)
8329 /* Reset Regs to Default Value */
8333 static void tricore_tcg_init_csfr(void)
8335 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
8336 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8337 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
8338 offsetof(CPUTriCoreState
, PSW
), "PSW");
8339 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
8340 offsetof(CPUTriCoreState
, PC
), "PC");
8341 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
8342 offsetof(CPUTriCoreState
, ICR
), "ICR");
8345 void tricore_tcg_init(void)
8352 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
8354 for (i
= 0 ; i
< 16 ; i
++) {
8355 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
8356 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8359 for (i
= 0 ; i
< 16 ; i
++) {
8360 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
8361 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8364 tricore_tcg_init_csfr();
8365 /* init PSW flag cache */
8366 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
8367 offsetof(CPUTriCoreState
, PSW_USB_C
),
8369 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
8370 offsetof(CPUTriCoreState
, PSW_USB_V
),
8372 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
8373 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8375 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
8376 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8378 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
8379 offsetof(CPUTriCoreState
, PSW_USB_SAV
),