2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
39 static TCGv cpu_gpr_a
[16];
40 static TCGv cpu_gpr_d
[16];
42 static TCGv cpu_PSW_C
;
43 static TCGv cpu_PSW_V
;
44 static TCGv cpu_PSW_SV
;
45 static TCGv cpu_PSW_AV
;
46 static TCGv cpu_PSW_SAV
;
48 static TCGv_ptr cpu_env
;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a
[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d
[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext
{
65 struct TranslationBlock
*tb
;
66 target_ulong pc
, saved_pc
, next_pc
;
68 int singlestep_enabled
;
69 /* Routine used to access memory */
71 uint32_t hflags
, saved_hflags
;
90 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
,
91 fprintf_function cpu_fprintf
, int flags
)
93 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
94 CPUTriCoreState
*env
= &cpu
->env
;
100 cpu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
101 cpu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
102 cpu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
103 cpu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
104 cpu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
105 cpu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
107 for (i
= 0; i
< 16; ++i
) {
109 cpu_fprintf(f
, "\nGPR A%02d:", i
);
111 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
113 for (i
= 0; i
< 16; ++i
) {
115 cpu_fprintf(f
, "\nGPR D%02d:", i
);
117 cpu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
119 cpu_fprintf(f
, "\n");
123 * Functions to generate micro-ops
126 /* Makros for generating helpers */
128 #define gen_helper_1arg(name, arg) do { \
129 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
130 gen_helper_##name(cpu_env, helper_tmp); \
131 tcg_temp_free_i32(helper_tmp); \
134 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
135 TCGv arg00 = tcg_temp_new(); \
136 TCGv arg01 = tcg_temp_new(); \
137 TCGv arg11 = tcg_temp_new(); \
138 tcg_gen_sari_tl(arg00, arg0, 16); \
139 tcg_gen_ext16s_tl(arg01, arg0); \
140 tcg_gen_ext16s_tl(arg11, arg1); \
141 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
142 tcg_temp_free(arg00); \
143 tcg_temp_free(arg01); \
144 tcg_temp_free(arg11); \
147 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
148 TCGv arg00 = tcg_temp_new(); \
149 TCGv arg01 = tcg_temp_new(); \
150 TCGv arg10 = tcg_temp_new(); \
151 TCGv arg11 = tcg_temp_new(); \
152 tcg_gen_sari_tl(arg00, arg0, 16); \
153 tcg_gen_ext16s_tl(arg01, arg0); \
154 tcg_gen_sari_tl(arg11, arg1, 16); \
155 tcg_gen_ext16s_tl(arg10, arg1); \
156 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
157 tcg_temp_free(arg00); \
158 tcg_temp_free(arg01); \
159 tcg_temp_free(arg10); \
160 tcg_temp_free(arg11); \
163 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
164 TCGv arg00 = tcg_temp_new(); \
165 TCGv arg01 = tcg_temp_new(); \
166 TCGv arg10 = tcg_temp_new(); \
167 TCGv arg11 = tcg_temp_new(); \
168 tcg_gen_sari_tl(arg00, arg0, 16); \
169 tcg_gen_ext16s_tl(arg01, arg0); \
170 tcg_gen_sari_tl(arg10, arg1, 16); \
171 tcg_gen_ext16s_tl(arg11, arg1); \
172 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
173 tcg_temp_free(arg00); \
174 tcg_temp_free(arg01); \
175 tcg_temp_free(arg10); \
176 tcg_temp_free(arg11); \
179 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
180 TCGv arg00 = tcg_temp_new(); \
181 TCGv arg01 = tcg_temp_new(); \
182 TCGv arg11 = tcg_temp_new(); \
183 tcg_gen_sari_tl(arg01, arg0, 16); \
184 tcg_gen_ext16s_tl(arg00, arg0); \
185 tcg_gen_sari_tl(arg11, arg1, 16); \
186 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
187 tcg_temp_free(arg00); \
188 tcg_temp_free(arg01); \
189 tcg_temp_free(arg11); \
192 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
193 TCGv_i64 ret = tcg_temp_new_i64(); \
194 TCGv_i64 arg1 = tcg_temp_new_i64(); \
196 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
197 gen_helper_##name(ret, arg1, arg2); \
198 tcg_gen_extr_i64_i32(rl, rh, ret); \
200 tcg_temp_free_i64(ret); \
201 tcg_temp_free_i64(arg1); \
204 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
205 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
206 ((offset & 0x0fffff) << 1))
208 /* Functions for load/save to/from memory */
210 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
211 int16_t con
, TCGMemOp mop
)
213 TCGv temp
= tcg_temp_new();
214 tcg_gen_addi_tl(temp
, r2
, con
);
215 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
219 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
220 int16_t con
, TCGMemOp mop
)
222 TCGv temp
= tcg_temp_new();
223 tcg_gen_addi_tl(temp
, r2
, con
);
224 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
228 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
230 TCGv_i64 temp
= tcg_temp_new_i64();
232 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
233 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
235 tcg_temp_free_i64(temp
);
238 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
241 TCGv temp
= tcg_temp_new();
242 tcg_gen_addi_tl(temp
, base
, con
);
243 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
247 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
249 TCGv_i64 temp
= tcg_temp_new_i64();
251 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEQ
);
252 /* write back to two 32 bit regs */
253 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
255 tcg_temp_free_i64(temp
);
258 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
261 TCGv temp
= tcg_temp_new();
262 tcg_gen_addi_tl(temp
, base
, con
);
263 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
267 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
270 TCGv temp
= tcg_temp_new();
271 tcg_gen_addi_tl(temp
, r2
, off
);
272 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
273 tcg_gen_mov_tl(r2
, temp
);
277 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
280 TCGv temp
= tcg_temp_new();
281 tcg_gen_addi_tl(temp
, r2
, off
);
282 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
283 tcg_gen_mov_tl(r2
, temp
);
287 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
288 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
290 TCGv temp
= tcg_temp_new();
291 TCGv temp2
= tcg_temp_new();
293 /* temp = (M(EA, word) */
294 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
295 /* temp = temp & ~E[a][63:32]) */
296 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
297 /* temp2 = (E[a][31:0] & E[a][63:32]); */
298 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
299 /* temp = temp | temp2; */
300 tcg_gen_or_tl(temp
, temp
, temp2
);
301 /* M(EA, word) = temp; */
302 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
305 tcg_temp_free(temp2
);
308 /* tmp = M(EA, word);
311 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
315 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
316 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
322 /* We generate loads and store to core special function register (csfr) through
323 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
324 makros R, A and E, which allow read-only, all and endinit protected access.
325 These makros also specify in which ISA version the csfr was introduced. */
326 #define R(ADDRESS, REG, FEATURE) \
328 if (tricore_feature(env, FEATURE)) { \
329 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
332 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
333 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
334 static inline void gen_mfcr(CPUTriCoreState
*env
, TCGv ret
, int32_t offset
)
336 /* since we're caching PSW make this a special case */
337 if (offset
== 0xfe04) {
338 gen_helper_psw_read(ret
, cpu_env
);
349 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
350 since no execption occurs */
351 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
353 if (tricore_feature(env, FEATURE)) { \
354 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
357 /* Endinit protected registers
358 TODO: Since the endinit bit is in a register of a not yet implemented
359 watchdog device, we handle endinit protected registers like
360 all-access registers for now. */
361 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
362 static inline void gen_mtcr(CPUTriCoreState
*env
, DisasContext
*ctx
, TCGv r1
,
365 if ((ctx
->hflags
& TRICORE_HFLAG_KUU
) == TRICORE_HFLAG_SM
) {
366 /* since we're caching PSW make this a special case */
367 if (offset
== 0xfe04) {
368 gen_helper_psw_write(cpu_env
, r1
);
375 /* generate privilege trap */
379 /* Functions for arithmetic instructions */
381 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
383 TCGv t0
= tcg_temp_new_i32();
384 TCGv result
= tcg_temp_new_i32();
385 /* Addition and set V/SV bits */
386 tcg_gen_add_tl(result
, r1
, r2
);
388 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
389 tcg_gen_xor_tl(t0
, r1
, r2
);
390 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
392 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
393 /* Calc AV/SAV bits */
394 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
395 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
397 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
398 /* write back result */
399 tcg_gen_mov_tl(ret
, result
);
401 tcg_temp_free(result
);
406 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
408 TCGv temp
= tcg_temp_new();
409 TCGv_i64 t0
= tcg_temp_new_i64();
410 TCGv_i64 t1
= tcg_temp_new_i64();
411 TCGv_i64 result
= tcg_temp_new_i64();
413 tcg_gen_add_i64(result
, r1
, r2
);
415 tcg_gen_xor_i64(t1
, result
, r1
);
416 tcg_gen_xor_i64(t0
, r1
, r2
);
417 tcg_gen_andc_i64(t1
, t1
, t0
);
418 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t1
, 32);
420 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
421 /* calc AV/SAV bits */
422 tcg_gen_trunc_shr_i64_i32(temp
, result
, 32);
423 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
424 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
426 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
427 /* write back result */
428 tcg_gen_mov_i64(ret
, result
);
431 tcg_temp_free_i64(result
);
432 tcg_temp_free_i64(t0
);
433 tcg_temp_free_i64(t1
);
437 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
438 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
439 void(*op2
)(TCGv
, TCGv
, TCGv
))
441 TCGv temp
= tcg_temp_new();
442 TCGv temp2
= tcg_temp_new();
443 TCGv temp3
= tcg_temp_new();
444 TCGv temp4
= tcg_temp_new();
446 (*op1
)(temp
, r1_low
, r2
);
448 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
449 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
450 if (op1
== tcg_gen_add_tl
) {
451 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
453 tcg_gen_and_tl(temp2
, temp2
, temp3
);
456 (*op2
)(temp3
, r1_high
, r3
);
458 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
459 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
460 if (op2
== tcg_gen_add_tl
) {
461 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
463 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
465 /* combine V0/V1 bits */
466 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
468 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
470 tcg_gen_mov_tl(ret_low
, temp
);
471 tcg_gen_mov_tl(ret_high
, temp3
);
473 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
474 tcg_gen_xor_tl(temp
, temp
, ret_low
);
475 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
476 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
477 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
479 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
482 tcg_temp_free(temp2
);
483 tcg_temp_free(temp3
);
484 tcg_temp_free(temp4
);
487 /* ret = r2 + (r1 * r3); */
488 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
490 TCGv_i64 t1
= tcg_temp_new_i64();
491 TCGv_i64 t2
= tcg_temp_new_i64();
492 TCGv_i64 t3
= tcg_temp_new_i64();
494 tcg_gen_ext_i32_i64(t1
, r1
);
495 tcg_gen_ext_i32_i64(t2
, r2
);
496 tcg_gen_ext_i32_i64(t3
, r3
);
498 tcg_gen_mul_i64(t1
, t1
, t3
);
499 tcg_gen_add_i64(t1
, t2
, t1
);
501 tcg_gen_trunc_i64_i32(ret
, t1
);
504 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
505 /* t1 < -0x80000000 */
506 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
507 tcg_gen_or_i64(t2
, t2
, t3
);
508 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
509 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
511 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
512 /* Calc AV/SAV bits */
513 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
514 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
516 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
518 tcg_temp_free_i64(t1
);
519 tcg_temp_free_i64(t2
);
520 tcg_temp_free_i64(t3
);
523 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
525 TCGv temp
= tcg_const_i32(con
);
526 gen_madd32_d(ret
, r1
, r2
, temp
);
531 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
534 TCGv t1
= tcg_temp_new();
535 TCGv t2
= tcg_temp_new();
536 TCGv t3
= tcg_temp_new();
537 TCGv t4
= tcg_temp_new();
539 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
540 /* only the add can overflow */
541 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
543 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
544 tcg_gen_xor_tl(t1
, r2_high
, t2
);
545 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
547 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
548 /* Calc AV/SAV bits */
549 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
550 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
552 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
553 /* write back the result */
554 tcg_gen_mov_tl(ret_low
, t3
);
555 tcg_gen_mov_tl(ret_high
, t4
);
564 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
567 TCGv_i64 t1
= tcg_temp_new_i64();
568 TCGv_i64 t2
= tcg_temp_new_i64();
569 TCGv_i64 t3
= tcg_temp_new_i64();
571 tcg_gen_extu_i32_i64(t1
, r1
);
572 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
573 tcg_gen_extu_i32_i64(t3
, r3
);
575 tcg_gen_mul_i64(t1
, t1
, t3
);
576 tcg_gen_add_i64(t2
, t2
, t1
);
577 /* write back result */
578 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
579 /* only the add overflows, if t2 < t1
581 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
582 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
583 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
585 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
586 /* Calc AV/SAV bits */
587 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
588 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
590 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
592 tcg_temp_free_i64(t1
);
593 tcg_temp_free_i64(t2
);
594 tcg_temp_free_i64(t3
);
598 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
601 TCGv temp
= tcg_const_i32(con
);
602 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
607 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
610 TCGv temp
= tcg_const_i32(con
);
611 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
616 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
617 TCGv r3
, uint32_t n
, uint32_t mode
)
619 TCGv temp
= tcg_const_i32(n
);
620 TCGv temp2
= tcg_temp_new();
621 TCGv_i64 temp64
= tcg_temp_new_i64();
624 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
627 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
630 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
633 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
636 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
637 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
638 tcg_gen_add_tl
, tcg_gen_add_tl
);
640 tcg_temp_free(temp2
);
641 tcg_temp_free_i64(temp64
);
644 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
647 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
648 TCGv r3
, uint32_t n
, uint32_t mode
)
650 TCGv temp
= tcg_const_i32(n
);
651 TCGv temp2
= tcg_temp_new();
652 TCGv temp3
= tcg_temp_new();
653 TCGv_i64 temp64
= tcg_temp_new_i64();
657 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
660 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
663 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
666 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
669 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
670 gen_adds(ret_low
, r1_low
, temp
);
671 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
672 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
673 gen_adds(ret_high
, r1_high
, temp2
);
675 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
676 /* combine av bits */
677 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
680 tcg_temp_free(temp2
);
681 tcg_temp_free(temp3
);
682 tcg_temp_free_i64(temp64
);
687 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
688 TCGv r3
, uint32_t n
, uint32_t mode
)
690 TCGv temp
= tcg_const_i32(n
);
691 TCGv_i64 temp64
= tcg_temp_new_i64();
692 TCGv_i64 temp64_2
= tcg_temp_new_i64();
693 TCGv_i64 temp64_3
= tcg_temp_new_i64();
696 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
699 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
702 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
705 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
708 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
709 gen_add64_d(temp64_3
, temp64_2
, temp64
);
710 /* write back result */
711 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
714 tcg_temp_free_i64(temp64
);
715 tcg_temp_free_i64(temp64_2
);
716 tcg_temp_free_i64(temp64_3
);
720 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
721 TCGv r3
, uint32_t n
, uint32_t mode
)
723 TCGv temp
= tcg_const_i32(n
);
724 TCGv_i64 temp64
= tcg_temp_new_i64();
725 TCGv_i64 temp64_2
= tcg_temp_new_i64();
728 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, temp
);
731 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, temp
);
734 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, temp
);
737 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, temp
);
740 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
741 gen_helper_add64_ssov(temp64
, cpu_env
, temp64_2
, temp64
);
742 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
745 tcg_temp_free_i64(temp64
);
746 tcg_temp_free_i64(temp64_2
);
750 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
753 TCGv temp
= tcg_const_i32(n
);
754 TCGv_i64 temp64
= tcg_temp_new_i64();
757 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
760 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
763 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
766 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
769 gen_helper_addr_h(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
772 tcg_temp_free_i64(temp64
);
776 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
778 TCGv temp
= tcg_temp_new();
779 TCGv temp2
= tcg_temp_new();
781 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
782 tcg_gen_shli_tl(temp
, r1
, 16);
783 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
786 tcg_temp_free(temp2
);
790 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
791 uint32_t n
, uint32_t mode
)
793 TCGv temp
= tcg_const_i32(n
);
794 TCGv_i64 temp64
= tcg_temp_new_i64();
797 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, temp
);
800 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, temp
);
803 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, temp
);
806 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, temp
);
809 gen_helper_addr_h_ssov(ret
, cpu_env
, temp64
, r1_low
, r1_high
);
812 tcg_temp_free_i64(temp64
);
816 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
818 TCGv temp
= tcg_temp_new();
819 TCGv temp2
= tcg_temp_new();
821 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
822 tcg_gen_shli_tl(temp
, r1
, 16);
823 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
826 tcg_temp_free(temp2
);
830 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
832 TCGv temp
= tcg_const_i32(n
);
833 gen_helper_maddr_q(ret
, cpu_env
, r1
, r2
, r3
, temp
);
838 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
840 TCGv temp
= tcg_const_i32(n
);
841 gen_helper_maddr_q_ssov(ret
, cpu_env
, r1
, r2
, r3
, temp
);
846 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
847 uint32_t up_shift
, CPUTriCoreState
*env
)
849 TCGv temp
= tcg_temp_new();
850 TCGv temp2
= tcg_temp_new();
851 TCGv temp3
= tcg_temp_new();
852 TCGv_i64 t1
= tcg_temp_new_i64();
853 TCGv_i64 t2
= tcg_temp_new_i64();
854 TCGv_i64 t3
= tcg_temp_new_i64();
856 tcg_gen_ext_i32_i64(t2
, arg2
);
857 tcg_gen_ext_i32_i64(t3
, arg3
);
859 tcg_gen_mul_i64(t2
, t2
, t3
);
860 tcg_gen_shli_i64(t2
, t2
, n
);
862 tcg_gen_ext_i32_i64(t1
, arg1
);
863 tcg_gen_sari_i64(t2
, t2
, up_shift
);
865 tcg_gen_add_i64(t3
, t1
, t2
);
866 tcg_gen_trunc_i64_i32(temp3
, t3
);
868 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
869 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
870 tcg_gen_or_i64(t1
, t1
, t2
);
871 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
872 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
873 /* We produce an overflow on the host if the mul before was
874 (0x80000000 * 0x80000000) << 1). If this is the
875 case, we negate the ovf. */
877 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
878 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
879 tcg_gen_and_tl(temp
, temp
, temp2
);
880 tcg_gen_shli_tl(temp
, temp
, 31);
881 /* negate v bit, if special condition */
882 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
885 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
886 /* Calc AV/SAV bits */
887 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
888 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
890 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
891 /* write back result */
892 tcg_gen_mov_tl(ret
, temp3
);
895 tcg_temp_free(temp2
);
896 tcg_temp_free(temp3
);
897 tcg_temp_free_i64(t1
);
898 tcg_temp_free_i64(t2
);
899 tcg_temp_free_i64(t3
);
903 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
905 TCGv temp
= tcg_temp_new();
906 TCGv temp2
= tcg_temp_new();
908 tcg_gen_mul_tl(temp
, arg2
, arg3
);
909 } else { /* n is exspected to be 1 */
910 tcg_gen_mul_tl(temp
, arg2
, arg3
);
911 tcg_gen_shli_tl(temp
, temp
, 1);
912 /* catch special case r1 = r2 = 0x8000 */
913 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
914 tcg_gen_sub_tl(temp
, temp
, temp2
);
916 gen_add_d(ret
, arg1
, temp
);
919 tcg_temp_free(temp2
);
923 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
925 TCGv temp
= tcg_temp_new();
926 TCGv temp2
= tcg_temp_new();
928 tcg_gen_mul_tl(temp
, arg2
, arg3
);
929 } else { /* n is exspected to be 1 */
930 tcg_gen_mul_tl(temp
, arg2
, arg3
);
931 tcg_gen_shli_tl(temp
, temp
, 1);
932 /* catch special case r1 = r2 = 0x8000 */
933 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
934 tcg_gen_sub_tl(temp
, temp
, temp2
);
936 gen_adds(ret
, arg1
, temp
);
939 tcg_temp_free(temp2
);
943 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
944 TCGv arg3
, uint32_t n
)
946 TCGv temp
= tcg_temp_new();
947 TCGv temp2
= tcg_temp_new();
948 TCGv_i64 t1
= tcg_temp_new_i64();
949 TCGv_i64 t2
= tcg_temp_new_i64();
950 TCGv_i64 t3
= tcg_temp_new_i64();
953 tcg_gen_mul_tl(temp
, arg2
, arg3
);
954 } else { /* n is exspected to be 1 */
955 tcg_gen_mul_tl(temp
, arg2
, arg3
);
956 tcg_gen_shli_tl(temp
, temp
, 1);
957 /* catch special case r1 = r2 = 0x8000 */
958 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
959 tcg_gen_sub_tl(temp
, temp
, temp2
);
961 tcg_gen_ext_i32_i64(t2
, temp
);
962 tcg_gen_shli_i64(t2
, t2
, 16);
963 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
964 gen_add64_d(t3
, t1
, t2
);
965 /* write back result */
966 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
968 tcg_temp_free_i64(t1
);
969 tcg_temp_free_i64(t2
);
970 tcg_temp_free_i64(t3
);
972 tcg_temp_free(temp2
);
976 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
977 TCGv arg3
, uint32_t n
)
979 TCGv temp
= tcg_temp_new();
980 TCGv temp2
= tcg_temp_new();
981 TCGv_i64 t1
= tcg_temp_new_i64();
982 TCGv_i64 t2
= tcg_temp_new_i64();
985 tcg_gen_mul_tl(temp
, arg2
, arg3
);
986 } else { /* n is exspected to be 1 */
987 tcg_gen_mul_tl(temp
, arg2
, arg3
);
988 tcg_gen_shli_tl(temp
, temp
, 1);
989 /* catch special case r1 = r2 = 0x8000 */
990 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
991 tcg_gen_sub_tl(temp
, temp
, temp2
);
993 tcg_gen_ext_i32_i64(t2
, temp
);
994 tcg_gen_shli_i64(t2
, t2
, 16);
995 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
997 gen_helper_add64_ssov(t1
, cpu_env
, t1
, t2
);
998 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1000 tcg_temp_free(temp
);
1001 tcg_temp_free(temp2
);
1002 tcg_temp_free_i64(t1
);
1003 tcg_temp_free_i64(t2
);
1007 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1008 TCGv arg3
, uint32_t n
, CPUTriCoreState
*env
)
1010 TCGv_i64 t1
= tcg_temp_new_i64();
1011 TCGv_i64 t2
= tcg_temp_new_i64();
1012 TCGv_i64 t3
= tcg_temp_new_i64();
1013 TCGv_i64 t4
= tcg_temp_new_i64();
1016 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1017 tcg_gen_ext_i32_i64(t2
, arg2
);
1018 tcg_gen_ext_i32_i64(t3
, arg3
);
1020 tcg_gen_mul_i64(t2
, t2
, t3
);
1022 tcg_gen_shli_i64(t2
, t2
, 1);
1024 tcg_gen_add_i64(t4
, t1
, t2
);
1026 tcg_gen_xor_i64(t3
, t4
, t1
);
1027 tcg_gen_xor_i64(t2
, t1
, t2
);
1028 tcg_gen_andc_i64(t3
, t3
, t2
);
1029 tcg_gen_trunc_shr_i64_i32(cpu_PSW_V
, t3
, 32);
1030 /* We produce an overflow on the host if the mul before was
1031 (0x80000000 * 0x80000000) << 1). If this is the
1032 case, we negate the ovf. */
1034 temp
= tcg_temp_new();
1035 temp2
= tcg_temp_new();
1036 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1037 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1038 tcg_gen_and_tl(temp
, temp
, temp2
);
1039 tcg_gen_shli_tl(temp
, temp
, 31);
1040 /* negate v bit, if special condition */
1041 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1043 tcg_temp_free(temp
);
1044 tcg_temp_free(temp2
);
1046 /* write back result */
1047 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1049 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1050 /* Calc AV/SAV bits */
1051 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1052 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1054 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1056 tcg_temp_free_i64(t1
);
1057 tcg_temp_free_i64(t2
);
1058 tcg_temp_free_i64(t3
);
1059 tcg_temp_free_i64(t4
);
1063 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1066 TCGv_i64 t1
= tcg_temp_new_i64();
1067 TCGv_i64 t2
= tcg_temp_new_i64();
1068 TCGv_i64 t3
= tcg_temp_new_i64();
1070 tcg_gen_ext_i32_i64(t1
, arg1
);
1071 tcg_gen_ext_i32_i64(t2
, arg2
);
1072 tcg_gen_ext_i32_i64(t3
, arg3
);
1074 tcg_gen_mul_i64(t2
, t2
, t3
);
1075 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1077 gen_helper_madd32_q_add_ssov(ret
, cpu_env
, t1
, t2
);
1079 tcg_temp_free_i64(t1
);
1080 tcg_temp_free_i64(t2
);
1081 tcg_temp_free_i64(t3
);
1085 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1086 TCGv arg3
, uint32_t n
)
1088 TCGv_i64 r1
= tcg_temp_new_i64();
1089 TCGv temp
= tcg_const_i32(n
);
1091 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1092 gen_helper_madd64_q_ssov(r1
, cpu_env
, r1
, arg2
, arg3
, temp
);
1093 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1095 tcg_temp_free_i64(r1
);
1096 tcg_temp_free(temp
);
1098 /* ret = r2 - (r1 * r3); */
1099 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1101 TCGv_i64 t1
= tcg_temp_new_i64();
1102 TCGv_i64 t2
= tcg_temp_new_i64();
1103 TCGv_i64 t3
= tcg_temp_new_i64();
1105 tcg_gen_ext_i32_i64(t1
, r1
);
1106 tcg_gen_ext_i32_i64(t2
, r2
);
1107 tcg_gen_ext_i32_i64(t3
, r3
);
1109 tcg_gen_mul_i64(t1
, t1
, t3
);
1110 tcg_gen_sub_i64(t1
, t2
, t1
);
1112 tcg_gen_trunc_i64_i32(ret
, t1
);
1115 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1116 /* result < -0x80000000 */
1117 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1118 tcg_gen_or_i64(t2
, t2
, t3
);
1119 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t2
);
1120 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1123 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1124 /* Calc AV/SAV bits */
1125 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1126 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1128 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1130 tcg_temp_free_i64(t1
);
1131 tcg_temp_free_i64(t2
);
1132 tcg_temp_free_i64(t3
);
1135 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1137 TCGv temp
= tcg_const_i32(con
);
1138 gen_msub32_d(ret
, r1
, r2
, temp
);
1139 tcg_temp_free(temp
);
1143 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1146 TCGv t1
= tcg_temp_new();
1147 TCGv t2
= tcg_temp_new();
1148 TCGv t3
= tcg_temp_new();
1149 TCGv t4
= tcg_temp_new();
1151 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1152 /* only the sub can overflow */
1153 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1155 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1156 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1157 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1159 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1160 /* Calc AV/SAV bits */
1161 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1162 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1164 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1165 /* write back the result */
1166 tcg_gen_mov_tl(ret_low
, t3
);
1167 tcg_gen_mov_tl(ret_high
, t4
);
1176 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1179 TCGv temp
= tcg_const_i32(con
);
1180 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1181 tcg_temp_free(temp
);
1185 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1188 TCGv_i64 t1
= tcg_temp_new_i64();
1189 TCGv_i64 t2
= tcg_temp_new_i64();
1190 TCGv_i64 t3
= tcg_temp_new_i64();
1192 tcg_gen_extu_i32_i64(t1
, r1
);
1193 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1194 tcg_gen_extu_i32_i64(t3
, r3
);
1196 tcg_gen_mul_i64(t1
, t1
, t3
);
1197 tcg_gen_sub_i64(t3
, t2
, t1
);
1198 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1199 /* calc V bit, only the sub can overflow, if t1 > t2 */
1200 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1201 tcg_gen_trunc_i64_i32(cpu_PSW_V
, t1
);
1202 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1204 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1205 /* Calc AV/SAV bits */
1206 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1207 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1209 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1211 tcg_temp_free_i64(t1
);
1212 tcg_temp_free_i64(t2
);
1213 tcg_temp_free_i64(t3
);
1217 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1220 TCGv temp
= tcg_const_i32(con
);
1221 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1222 tcg_temp_free(temp
);
1225 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1227 TCGv temp
= tcg_const_i32(r2
);
1228 gen_add_d(ret
, r1
, temp
);
1229 tcg_temp_free(temp
);
1231 /* calculate the carry bit too */
1232 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1234 TCGv t0
= tcg_temp_new_i32();
1235 TCGv result
= tcg_temp_new_i32();
1237 tcg_gen_movi_tl(t0
, 0);
1238 /* Addition and set C/V/SV bits */
1239 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1241 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1242 tcg_gen_xor_tl(t0
, r1
, r2
);
1243 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1245 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1246 /* Calc AV/SAV bits */
1247 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1248 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1250 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1251 /* write back result */
1252 tcg_gen_mov_tl(ret
, result
);
1254 tcg_temp_free(result
);
1258 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1260 TCGv temp
= tcg_const_i32(con
);
1261 gen_add_CC(ret
, r1
, temp
);
1262 tcg_temp_free(temp
);
1265 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1267 TCGv carry
= tcg_temp_new_i32();
1268 TCGv t0
= tcg_temp_new_i32();
1269 TCGv result
= tcg_temp_new_i32();
1271 tcg_gen_movi_tl(t0
, 0);
1272 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1273 /* Addition, carry and set C/V/SV bits */
1274 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1275 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1277 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1278 tcg_gen_xor_tl(t0
, r1
, r2
);
1279 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1281 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1282 /* Calc AV/SAV bits */
1283 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1284 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1286 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1287 /* write back result */
1288 tcg_gen_mov_tl(ret
, result
);
1290 tcg_temp_free(result
);
1292 tcg_temp_free(carry
);
1295 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1297 TCGv temp
= tcg_const_i32(con
);
1298 gen_addc_CC(ret
, r1
, temp
);
1299 tcg_temp_free(temp
);
1302 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1305 TCGv temp
= tcg_temp_new();
1306 TCGv temp2
= tcg_temp_new();
1307 TCGv result
= tcg_temp_new();
1308 TCGv mask
= tcg_temp_new();
1309 TCGv t0
= tcg_const_i32(0);
1311 /* create mask for sticky bits */
1312 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1313 tcg_gen_shli_tl(mask
, mask
, 31);
1315 tcg_gen_add_tl(result
, r1
, r2
);
1317 tcg_gen_xor_tl(temp
, result
, r1
);
1318 tcg_gen_xor_tl(temp2
, r1
, r2
);
1319 tcg_gen_andc_tl(temp
, temp
, temp2
);
1320 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1322 tcg_gen_and_tl(temp
, temp
, mask
);
1323 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1325 tcg_gen_add_tl(temp
, result
, result
);
1326 tcg_gen_xor_tl(temp
, temp
, result
);
1327 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1329 tcg_gen_and_tl(temp
, temp
, mask
);
1330 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1331 /* write back result */
1332 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1335 tcg_temp_free(temp
);
1336 tcg_temp_free(temp2
);
1337 tcg_temp_free(result
);
1338 tcg_temp_free(mask
);
1341 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1344 TCGv temp
= tcg_const_i32(r2
);
1345 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1346 tcg_temp_free(temp
);
1349 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1351 TCGv temp
= tcg_temp_new_i32();
1352 TCGv result
= tcg_temp_new_i32();
1354 tcg_gen_sub_tl(result
, r1
, r2
);
1356 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1357 tcg_gen_xor_tl(temp
, r1
, r2
);
1358 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1360 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1362 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1363 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1365 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1366 /* write back result */
1367 tcg_gen_mov_tl(ret
, result
);
1369 tcg_temp_free(temp
);
1370 tcg_temp_free(result
);
1373 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1375 TCGv result
= tcg_temp_new();
1376 TCGv temp
= tcg_temp_new();
1378 tcg_gen_sub_tl(result
, r1
, r2
);
1380 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1382 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1383 tcg_gen_xor_tl(temp
, r1
, r2
);
1384 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1386 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1388 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1389 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1391 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1392 /* write back result */
1393 tcg_gen_mov_tl(ret
, result
);
1395 tcg_temp_free(result
);
1396 tcg_temp_free(temp
);
1399 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1401 TCGv temp
= tcg_temp_new();
1402 tcg_gen_not_tl(temp
, r2
);
1403 gen_addc_CC(ret
, r1
, temp
);
1404 tcg_temp_free(temp
);
1407 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1410 TCGv temp
= tcg_temp_new();
1411 TCGv temp2
= tcg_temp_new();
1412 TCGv result
= tcg_temp_new();
1413 TCGv mask
= tcg_temp_new();
1414 TCGv t0
= tcg_const_i32(0);
1416 /* create mask for sticky bits */
1417 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1418 tcg_gen_shli_tl(mask
, mask
, 31);
1420 tcg_gen_sub_tl(result
, r1
, r2
);
1422 tcg_gen_xor_tl(temp
, result
, r1
);
1423 tcg_gen_xor_tl(temp2
, r1
, r2
);
1424 tcg_gen_and_tl(temp
, temp
, temp2
);
1425 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1427 tcg_gen_and_tl(temp
, temp
, mask
);
1428 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1430 tcg_gen_add_tl(temp
, result
, result
);
1431 tcg_gen_xor_tl(temp
, temp
, result
);
1432 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1434 tcg_gen_and_tl(temp
, temp
, mask
);
1435 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1436 /* write back result */
1437 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1440 tcg_temp_free(temp
);
1441 tcg_temp_free(temp2
);
1442 tcg_temp_free(result
);
1443 tcg_temp_free(mask
);
1446 static inline void gen_abs(TCGv ret
, TCGv r1
)
1448 TCGv temp
= tcg_temp_new();
1449 TCGv t0
= tcg_const_i32(0);
1451 tcg_gen_neg_tl(temp
, r1
);
1452 tcg_gen_movcond_tl(TCG_COND_GE
, ret
, r1
, t0
, r1
, temp
);
1453 /* overflow can only happen, if r1 = 0x80000000 */
1454 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
1455 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1457 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1459 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1460 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1462 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1464 tcg_temp_free(temp
);
1468 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
1470 TCGv temp
= tcg_temp_new_i32();
1471 TCGv result
= tcg_temp_new_i32();
1473 tcg_gen_sub_tl(result
, r1
, r2
);
1474 tcg_gen_sub_tl(temp
, r2
, r1
);
1475 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
1478 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1479 tcg_gen_xor_tl(temp
, result
, r2
);
1480 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
1481 tcg_gen_xor_tl(temp
, r1
, r2
);
1482 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1484 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1486 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1487 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1489 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1490 /* write back result */
1491 tcg_gen_mov_tl(ret
, result
);
1493 tcg_temp_free(temp
);
1494 tcg_temp_free(result
);
1497 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
1499 TCGv temp
= tcg_const_i32(con
);
1500 gen_absdif(ret
, r1
, temp
);
1501 tcg_temp_free(temp
);
1504 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
1506 TCGv temp
= tcg_const_i32(con
);
1507 gen_helper_absdif_ssov(ret
, cpu_env
, r1
, temp
);
1508 tcg_temp_free(temp
);
1511 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
1513 TCGv high
= tcg_temp_new();
1514 TCGv low
= tcg_temp_new();
1516 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
1517 tcg_gen_mov_tl(ret
, low
);
1519 tcg_gen_sari_tl(low
, low
, 31);
1520 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
1521 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1523 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1525 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1526 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1528 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1530 tcg_temp_free(high
);
1534 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
1536 TCGv temp
= tcg_const_i32(con
);
1537 gen_mul_i32s(ret
, r1
, temp
);
1538 tcg_temp_free(temp
);
1541 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
1543 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
1545 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1547 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1549 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1550 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1552 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1555 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
1558 TCGv temp
= tcg_const_i32(con
);
1559 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
1560 tcg_temp_free(temp
);
1563 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
1565 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
1567 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1569 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1571 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1572 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1574 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1577 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
1580 TCGv temp
= tcg_const_i32(con
);
1581 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
1582 tcg_temp_free(temp
);
1585 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
1587 TCGv temp
= tcg_const_i32(con
);
1588 gen_helper_mul_ssov(ret
, cpu_env
, r1
, temp
);
1589 tcg_temp_free(temp
);
1592 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
1594 TCGv temp
= tcg_const_i32(con
);
1595 gen_helper_mul_suov(ret
, cpu_env
, r1
, temp
);
1596 tcg_temp_free(temp
);
1598 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
1599 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1601 TCGv temp
= tcg_const_i32(con
);
1602 gen_helper_madd32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
1603 tcg_temp_free(temp
);
1606 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1608 TCGv temp
= tcg_const_i32(con
);
1609 gen_helper_madd32_suov(ret
, cpu_env
, r1
, r2
, temp
);
1610 tcg_temp_free(temp
);
1614 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
1616 TCGv temp
= tcg_temp_new();
1617 TCGv_i64 temp_64
= tcg_temp_new_i64();
1618 TCGv_i64 temp2_64
= tcg_temp_new_i64();
1621 if (up_shift
== 32) {
1622 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
1623 } else if (up_shift
== 16) {
1624 tcg_gen_ext_i32_i64(temp_64
, arg1
);
1625 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
1627 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
1628 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
1629 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
1631 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
1634 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1635 } else { /* n is exspected to be 1 */
1636 tcg_gen_ext_i32_i64(temp_64
, arg1
);
1637 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
1639 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
1641 if (up_shift
== 0) {
1642 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
1644 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
1646 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
1647 /* overflow only occours if r1 = r2 = 0x8000 */
1648 if (up_shift
== 0) {/* result is 64 bit */
1649 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
1651 } else { /* result is 32 bit */
1652 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
1655 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1656 /* calc sv overflow bit */
1657 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1659 /* calc av overflow bit */
1660 if (up_shift
== 0) {
1661 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1662 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1664 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
1665 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
1667 /* calc sav overflow bit */
1668 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1669 tcg_temp_free(temp
);
1670 tcg_temp_free_i64(temp_64
);
1671 tcg_temp_free_i64(temp2_64
);
1675 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
1677 TCGv temp
= tcg_temp_new();
1679 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1680 } else { /* n is exspected to be 1 */
1681 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1682 tcg_gen_shli_tl(ret
, ret
, 1);
1683 /* catch special case r1 = r2 = 0x8000 */
1684 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
1685 tcg_gen_sub_tl(ret
, ret
, temp
);
1688 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1689 /* calc av overflow bit */
1690 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1691 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1692 /* calc sav overflow bit */
1693 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1695 tcg_temp_free(temp
);
1698 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
1700 TCGv temp
= tcg_temp_new();
1702 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1703 tcg_gen_addi_tl(ret
, ret
, 0x8000);
1705 tcg_gen_mul_tl(ret
, arg1
, arg2
);
1706 tcg_gen_shli_tl(ret
, ret
, 1);
1707 tcg_gen_addi_tl(ret
, ret
, 0x8000);
1708 /* catch special case r1 = r2 = 0x8000 */
1709 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
1710 tcg_gen_muli_tl(temp
, temp
, 0x8001);
1711 tcg_gen_sub_tl(ret
, ret
, temp
);
1714 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1715 /* calc av overflow bit */
1716 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1717 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1718 /* calc sav overflow bit */
1719 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1720 /* cut halfword off */
1721 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
1723 tcg_temp_free(temp
);
1727 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1730 TCGv_i64 temp64
= tcg_temp_new_i64();
1731 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1732 gen_helper_madd64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
1733 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1734 tcg_temp_free_i64(temp64
);
1738 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1741 TCGv temp
= tcg_const_i32(con
);
1742 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1743 tcg_temp_free(temp
);
1747 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1750 TCGv_i64 temp64
= tcg_temp_new_i64();
1751 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1752 gen_helper_madd64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
1753 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1754 tcg_temp_free_i64(temp64
);
1758 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1761 TCGv temp
= tcg_const_i32(con
);
1762 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1763 tcg_temp_free(temp
);
1766 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1768 TCGv temp
= tcg_const_i32(con
);
1769 gen_helper_msub32_ssov(ret
, cpu_env
, r1
, r2
, temp
);
1770 tcg_temp_free(temp
);
1773 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1775 TCGv temp
= tcg_const_i32(con
);
1776 gen_helper_msub32_suov(ret
, cpu_env
, r1
, r2
, temp
);
1777 tcg_temp_free(temp
);
1781 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1784 TCGv_i64 temp64
= tcg_temp_new_i64();
1785 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1786 gen_helper_msub64_ssov(temp64
, cpu_env
, r1
, temp64
, r3
);
1787 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1788 tcg_temp_free_i64(temp64
);
1792 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1795 TCGv temp
= tcg_const_i32(con
);
1796 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1797 tcg_temp_free(temp
);
1801 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1804 TCGv_i64 temp64
= tcg_temp_new_i64();
1805 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
1806 gen_helper_msub64_suov(temp64
, cpu_env
, r1
, temp64
, r3
);
1807 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1808 tcg_temp_free_i64(temp64
);
1812 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1815 TCGv temp
= tcg_const_i32(con
);
1816 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1817 tcg_temp_free(temp
);
1820 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
1822 TCGv sat_neg
= tcg_const_i32(low
);
1823 TCGv temp
= tcg_const_i32(up
);
1825 /* sat_neg = (arg < low ) ? low : arg; */
1826 tcg_gen_movcond_tl(TCG_COND_LT
, sat_neg
, arg
, sat_neg
, sat_neg
, arg
);
1828 /* ret = (sat_neg > up ) ? up : sat_neg; */
1829 tcg_gen_movcond_tl(TCG_COND_GT
, ret
, sat_neg
, temp
, temp
, sat_neg
);
1831 tcg_temp_free(sat_neg
);
1832 tcg_temp_free(temp
);
1835 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
1837 TCGv temp
= tcg_const_i32(up
);
1838 /* sat_neg = (arg > up ) ? up : arg; */
1839 tcg_gen_movcond_tl(TCG_COND_GTU
, ret
, arg
, temp
, temp
, arg
);
1840 tcg_temp_free(temp
);
1843 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1845 if (shift_count
== -32) {
1846 tcg_gen_movi_tl(ret
, 0);
1847 } else if (shift_count
>= 0) {
1848 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1850 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
1854 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
1856 TCGv temp_low
, temp_high
;
1858 if (shiftcount
== -16) {
1859 tcg_gen_movi_tl(ret
, 0);
1861 temp_high
= tcg_temp_new();
1862 temp_low
= tcg_temp_new();
1864 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
1865 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
1866 gen_shi(temp_low
, temp_low
, shiftcount
);
1867 gen_shi(ret
, temp_high
, shiftcount
);
1868 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
1870 tcg_temp_free(temp_low
);
1871 tcg_temp_free(temp_high
);
1875 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
1877 uint32_t msk
, msk_start
;
1878 TCGv temp
= tcg_temp_new();
1879 TCGv temp2
= tcg_temp_new();
1880 TCGv t_0
= tcg_const_i32(0);
1882 if (shift_count
== 0) {
1883 /* Clear PSW.C and PSW.V */
1884 tcg_gen_movi_tl(cpu_PSW_C
, 0);
1885 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
1886 tcg_gen_mov_tl(ret
, r1
);
1887 } else if (shift_count
== -32) {
1889 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
1890 /* fill ret completly with sign bit */
1891 tcg_gen_sari_tl(ret
, r1
, 31);
1893 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1894 } else if (shift_count
> 0) {
1895 TCGv t_max
= tcg_const_i32(0x7FFFFFFF >> shift_count
);
1896 TCGv t_min
= tcg_const_i32(((int32_t) -0x80000000) >> shift_count
);
1899 msk_start
= 32 - shift_count
;
1900 msk
= ((1 << shift_count
) - 1) << msk_start
;
1901 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1902 /* calc v/sv bits */
1903 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
1904 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
1905 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
1906 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1908 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
1910 tcg_gen_shli_tl(ret
, r1
, shift_count
);
1912 tcg_temp_free(t_max
);
1913 tcg_temp_free(t_min
);
1916 tcg_gen_movi_tl(cpu_PSW_V
, 0);
1918 msk
= (1 << -shift_count
) - 1;
1919 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
1921 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1923 /* calc av overflow bit */
1924 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1925 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1926 /* calc sav overflow bit */
1927 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1929 tcg_temp_free(temp
);
1930 tcg_temp_free(temp2
);
1934 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
1936 gen_helper_sha_ssov(ret
, cpu_env
, r1
, r2
);
1939 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
1941 TCGv temp
= tcg_const_i32(con
);
1942 gen_shas(ret
, r1
, temp
);
1943 tcg_temp_free(temp
);
1946 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
1950 if (shift_count
== 0) {
1951 tcg_gen_mov_tl(ret
, r1
);
1952 } else if (shift_count
> 0) {
1953 low
= tcg_temp_new();
1954 high
= tcg_temp_new();
1956 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
1957 tcg_gen_shli_tl(low
, r1
, shift_count
);
1958 tcg_gen_shli_tl(ret
, high
, shift_count
);
1959 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1962 tcg_temp_free(high
);
1964 low
= tcg_temp_new();
1965 high
= tcg_temp_new();
1967 tcg_gen_ext16s_tl(low
, r1
);
1968 tcg_gen_sari_tl(low
, low
, -shift_count
);
1969 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
1970 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
1973 tcg_temp_free(high
);
1978 /* ret = {ret[30:0], (r1 cond r2)}; */
1979 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
1981 TCGv temp
= tcg_temp_new();
1982 TCGv temp2
= tcg_temp_new();
1984 tcg_gen_shli_tl(temp
, ret
, 1);
1985 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
1986 tcg_gen_or_tl(ret
, temp
, temp2
);
1988 tcg_temp_free(temp
);
1989 tcg_temp_free(temp2
);
1992 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
1994 TCGv temp
= tcg_const_i32(con
);
1995 gen_sh_cond(cond
, ret
, r1
, temp
);
1996 tcg_temp_free(temp
);
1999 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2001 gen_helper_add_ssov(ret
, cpu_env
, r1
, r2
);
2004 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2006 TCGv temp
= tcg_const_i32(con
);
2007 gen_helper_add_ssov(ret
, cpu_env
, r1
, temp
);
2008 tcg_temp_free(temp
);
2011 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2013 TCGv temp
= tcg_const_i32(con
);
2014 gen_helper_add_suov(ret
, cpu_env
, r1
, temp
);
2015 tcg_temp_free(temp
);
2018 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2020 gen_helper_sub_ssov(ret
, cpu_env
, r1
, r2
);
2023 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2025 gen_helper_sub_suov(ret
, cpu_env
, r1
, r2
);
2028 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2030 void(*op1
)(TCGv
, TCGv
, TCGv
),
2031 void(*op2
)(TCGv
, TCGv
, TCGv
))
2035 temp1
= tcg_temp_new();
2036 temp2
= tcg_temp_new();
2038 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2039 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2041 (*op1
)(temp1
, temp1
, temp2
);
2042 (*op2
)(temp1
, ret
, temp1
);
2044 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2046 tcg_temp_free(temp1
);
2047 tcg_temp_free(temp2
);
2050 /* ret = r1[pos1] op1 r2[pos2]; */
2051 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2053 void(*op1
)(TCGv
, TCGv
, TCGv
))
2057 temp1
= tcg_temp_new();
2058 temp2
= tcg_temp_new();
2060 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2061 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2063 (*op1
)(ret
, temp1
, temp2
);
2065 tcg_gen_andi_tl(ret
, ret
, 0x1);
2067 tcg_temp_free(temp1
);
2068 tcg_temp_free(temp2
);
2071 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2072 void(*op
)(TCGv
, TCGv
, TCGv
))
2074 TCGv temp
= tcg_temp_new();
2075 TCGv temp2
= tcg_temp_new();
2076 /* temp = (arg1 cond arg2 )*/
2077 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2079 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2080 /* temp = temp insn temp2 */
2081 (*op
)(temp
, temp
, temp2
);
2082 /* ret = {ret[31:1], temp} */
2083 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2085 tcg_temp_free(temp
);
2086 tcg_temp_free(temp2
);
2090 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2091 void(*op
)(TCGv
, TCGv
, TCGv
))
2093 TCGv temp
= tcg_const_i32(con
);
2094 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2095 tcg_temp_free(temp
);
2098 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
2099 static inline void gen_cond_w(TCGCond cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2101 tcg_gen_setcond_tl(cond
, ret
, r1
, r2
);
2102 tcg_gen_neg_tl(ret
, ret
);
2105 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2107 TCGv b0
= tcg_temp_new();
2108 TCGv b1
= tcg_temp_new();
2109 TCGv b2
= tcg_temp_new();
2110 TCGv b3
= tcg_temp_new();
2113 tcg_gen_andi_tl(b0
, r1
, 0xff);
2114 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2117 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2118 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2121 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2122 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2125 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2126 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2129 tcg_gen_or_tl(ret
, b0
, b1
);
2130 tcg_gen_or_tl(ret
, ret
, b2
);
2131 tcg_gen_or_tl(ret
, ret
, b3
);
2139 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2141 TCGv h0
= tcg_temp_new();
2142 TCGv h1
= tcg_temp_new();
2145 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2146 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2149 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2150 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2153 tcg_gen_or_tl(ret
, h0
, h1
);
2158 /* mask = ((1 << width) -1) << pos;
2159 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2160 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2162 TCGv mask
= tcg_temp_new();
2163 TCGv temp
= tcg_temp_new();
2164 TCGv temp2
= tcg_temp_new();
2166 tcg_gen_movi_tl(mask
, 1);
2167 tcg_gen_shl_tl(mask
, mask
, width
);
2168 tcg_gen_subi_tl(mask
, mask
, 1);
2169 tcg_gen_shl_tl(mask
, mask
, pos
);
2171 tcg_gen_shl_tl(temp
, r2
, pos
);
2172 tcg_gen_and_tl(temp
, temp
, mask
);
2173 tcg_gen_andc_tl(temp2
, r1
, mask
);
2174 tcg_gen_or_tl(ret
, temp
, temp2
);
2176 tcg_temp_free(mask
);
2177 tcg_temp_free(temp
);
2178 tcg_temp_free(temp2
);
2181 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2183 TCGv_i64 temp
= tcg_temp_new_i64();
2185 gen_helper_bsplit(temp
, r1
);
2186 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2188 tcg_temp_free_i64(temp
);
2191 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2193 TCGv_i64 temp
= tcg_temp_new_i64();
2195 gen_helper_unpack(temp
, r1
);
2196 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2198 tcg_temp_free_i64(temp
);
2202 gen_dvinit_b(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2204 TCGv_i64 ret
= tcg_temp_new_i64();
2206 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
2207 gen_helper_dvinit_b_13(ret
, cpu_env
, r1
, r2
);
2209 gen_helper_dvinit_b_131(ret
, cpu_env
, r1
, r2
);
2211 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2213 tcg_temp_free_i64(ret
);
2217 gen_dvinit_h(CPUTriCoreState
*env
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2219 TCGv_i64 ret
= tcg_temp_new_i64();
2221 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
2222 gen_helper_dvinit_h_13(ret
, cpu_env
, r1
, r2
);
2224 gen_helper_dvinit_h_131(ret
, cpu_env
, r1
, r2
);
2226 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2228 tcg_temp_free_i64(ret
);
2231 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2233 TCGv temp
= tcg_temp_new();
2235 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2236 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2237 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2238 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2239 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2241 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2242 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2243 tcg_temp_free(temp
);
2246 static void gen_calc_usb_mulr_h(TCGv arg
)
2248 TCGv temp
= tcg_temp_new();
2250 tcg_gen_add_tl(temp
, arg
, arg
);
2251 tcg_gen_xor_tl(temp
, temp
, arg
);
2252 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2253 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2255 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2257 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2258 tcg_temp_free(temp
);
2261 /* helpers for generating program flow micro-ops */
2263 static inline void gen_save_pc(target_ulong pc
)
2265 tcg_gen_movi_tl(cpu_PC
, pc
);
2268 static inline void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2270 TranslationBlock
*tb
;
2272 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
2273 likely(!ctx
->singlestep_enabled
)) {
2276 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
2279 if (ctx
->singlestep_enabled
) {
2280 /* raise exception debug */
2286 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2287 TCGv r2
, int16_t address
)
2290 jumpLabel
= gen_new_label();
2291 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2293 gen_goto_tb(ctx
, 1, ctx
->next_pc
);
2295 gen_set_label(jumpLabel
);
2296 gen_goto_tb(ctx
, 0, ctx
->pc
+ address
* 2);
2299 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2300 int r2
, int16_t address
)
2302 TCGv temp
= tcg_const_i32(r2
);
2303 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2304 tcg_temp_free(temp
);
2307 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2310 l1
= gen_new_label();
2312 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2313 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2314 gen_goto_tb(ctx
, 1, ctx
->pc
+ offset
);
2316 gen_goto_tb(ctx
, 0, ctx
->next_pc
);
2319 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2320 int r2
, int32_t constant
, int32_t offset
)
2326 /* SB-format jumps */
2329 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2331 case OPC1_32_B_CALL
:
2332 case OPC1_16_SB_CALL
:
2333 gen_helper_1arg(call
, ctx
->next_pc
);
2334 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2337 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2339 case OPC1_16_SB_JNZ
:
2340 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2342 /* SBC-format jumps */
2343 case OPC1_16_SBC_JEQ
:
2344 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2346 case OPC1_16_SBC_JNE
:
2347 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2349 /* SBRN-format jumps */
2350 case OPC1_16_SBRN_JZ_T
:
2351 temp
= tcg_temp_new();
2352 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2353 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2354 tcg_temp_free(temp
);
2356 case OPC1_16_SBRN_JNZ_T
:
2357 temp
= tcg_temp_new();
2358 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2359 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2360 tcg_temp_free(temp
);
2362 /* SBR-format jumps */
2363 case OPC1_16_SBR_JEQ
:
2364 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2367 case OPC1_16_SBR_JNE
:
2368 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2371 case OPC1_16_SBR_JNZ
:
2372 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2374 case OPC1_16_SBR_JNZ_A
:
2375 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2377 case OPC1_16_SBR_JGEZ
:
2378 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2380 case OPC1_16_SBR_JGTZ
:
2381 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2383 case OPC1_16_SBR_JLEZ
:
2384 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2386 case OPC1_16_SBR_JLTZ
:
2387 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2389 case OPC1_16_SBR_JZ
:
2390 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2392 case OPC1_16_SBR_JZ_A
:
2393 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2395 case OPC1_16_SBR_LOOP
:
2396 gen_loop(ctx
, r1
, offset
* 2 - 32);
2398 /* SR-format jumps */
2400 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2403 case OPC2_16_SR_RET
:
2404 gen_helper_ret(cpu_env
);
2408 case OPC1_32_B_CALLA
:
2409 gen_helper_1arg(call
, ctx
->next_pc
);
2410 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2413 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2416 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
2419 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
2420 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2423 case OPCM_32_BRC_EQ_NEQ
:
2424 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
2425 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
2427 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
2430 case OPCM_32_BRC_GE
:
2431 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
2432 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
2434 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2435 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
2439 case OPCM_32_BRC_JLT
:
2440 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
2441 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
2443 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
2444 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
2448 case OPCM_32_BRC_JNE
:
2449 temp
= tcg_temp_new();
2450 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
2451 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2452 /* subi is unconditional */
2453 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2454 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2456 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2457 /* addi is unconditional */
2458 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2459 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
2461 tcg_temp_free(temp
);
2464 case OPCM_32_BRN_JTT
:
2465 n
= MASK_OP_BRN_N(ctx
->opcode
);
2467 temp
= tcg_temp_new();
2468 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
2470 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
2471 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2473 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2475 tcg_temp_free(temp
);
2478 case OPCM_32_BRR_EQ_NEQ
:
2479 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
2480 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2483 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2487 case OPCM_32_BRR_ADDR_EQ_NEQ
:
2488 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
2489 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2492 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
2496 case OPCM_32_BRR_GE
:
2497 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
2498 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2501 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2505 case OPCM_32_BRR_JLT
:
2506 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
2507 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2510 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
2514 case OPCM_32_BRR_LOOP
:
2515 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
2516 gen_loop(ctx
, r1
, offset
* 2);
2518 /* OPC2_32_BRR_LOOPU */
2519 gen_goto_tb(ctx
, 0, ctx
->pc
+ offset
* 2);
2522 case OPCM_32_BRR_JNE
:
2523 temp
= tcg_temp_new();
2524 temp2
= tcg_temp_new();
2525 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
2526 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2527 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2528 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2529 /* subi is unconditional */
2530 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2531 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2533 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
2534 /* also save r2, in case of r1 == r2, so r2 is not decremented */
2535 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
2536 /* addi is unconditional */
2537 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
2538 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
2540 tcg_temp_free(temp
);
2541 tcg_temp_free(temp2
);
2543 case OPCM_32_BRR_JNZ
:
2544 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
2545 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2547 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2551 printf("Branch Error at %x\n", ctx
->pc
);
2553 ctx
->bstate
= BS_BRANCH
;
2558 * Functions for decoding instructions
2561 static void decode_src_opc(DisasContext
*ctx
, int op1
)
2567 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
2568 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
2571 case OPC1_16_SRC_ADD
:
2572 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2574 case OPC1_16_SRC_ADD_A15
:
2575 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
2577 case OPC1_16_SRC_ADD_15A
:
2578 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
2580 case OPC1_16_SRC_ADD_A
:
2581 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
2583 case OPC1_16_SRC_CADD
:
2584 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
2587 case OPC1_16_SRC_CADDN
:
2588 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
2591 case OPC1_16_SRC_CMOV
:
2592 temp
= tcg_const_tl(0);
2593 temp2
= tcg_const_tl(const4
);
2594 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2595 temp2
, cpu_gpr_d
[r1
]);
2596 tcg_temp_free(temp
);
2597 tcg_temp_free(temp2
);
2599 case OPC1_16_SRC_CMOVN
:
2600 temp
= tcg_const_tl(0);
2601 temp2
= tcg_const_tl(const4
);
2602 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2603 temp2
, cpu_gpr_d
[r1
]);
2604 tcg_temp_free(temp
);
2605 tcg_temp_free(temp2
);
2607 case OPC1_16_SRC_EQ
:
2608 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2611 case OPC1_16_SRC_LT
:
2612 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2615 case OPC1_16_SRC_MOV
:
2616 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
2618 case OPC1_16_SRC_MOV_A
:
2619 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
2620 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
2622 case OPC1_16_SRC_SH
:
2623 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2625 case OPC1_16_SRC_SHA
:
2626 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
2631 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
2636 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
2637 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
2640 case OPC1_16_SRR_ADD
:
2641 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2643 case OPC1_16_SRR_ADD_A15
:
2644 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
2646 case OPC1_16_SRR_ADD_15A
:
2647 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2649 case OPC1_16_SRR_ADD_A
:
2650 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
2652 case OPC1_16_SRR_ADDS
:
2653 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2655 case OPC1_16_SRR_AND
:
2656 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2658 case OPC1_16_SRR_CMOV
:
2659 temp
= tcg_const_tl(0);
2660 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2661 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
2662 tcg_temp_free(temp
);
2664 case OPC1_16_SRR_CMOVN
:
2665 temp
= tcg_const_tl(0);
2666 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
2667 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
2668 tcg_temp_free(temp
);
2670 case OPC1_16_SRR_EQ
:
2671 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2674 case OPC1_16_SRR_LT
:
2675 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
2678 case OPC1_16_SRR_MOV
:
2679 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2681 case OPC1_16_SRR_MOV_A
:
2682 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
2684 case OPC1_16_SRR_MOV_AA
:
2685 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
2687 case OPC1_16_SRR_MOV_D
:
2688 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
2690 case OPC1_16_SRR_MUL
:
2691 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2693 case OPC1_16_SRR_OR
:
2694 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2696 case OPC1_16_SRR_SUB
:
2697 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2699 case OPC1_16_SRR_SUB_A15B
:
2700 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
2702 case OPC1_16_SRR_SUB_15AB
:
2703 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2705 case OPC1_16_SRR_SUBS
:
2706 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2708 case OPC1_16_SRR_XOR
:
2709 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
2714 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
2718 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
2719 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
2722 case OPC1_16_SSR_ST_A
:
2723 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2725 case OPC1_16_SSR_ST_A_POSTINC
:
2726 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2727 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2729 case OPC1_16_SSR_ST_B
:
2730 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2732 case OPC1_16_SSR_ST_B_POSTINC
:
2733 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2734 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
2736 case OPC1_16_SSR_ST_H
:
2737 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
2739 case OPC1_16_SSR_ST_H_POSTINC
:
2740 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
2741 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
2743 case OPC1_16_SSR_ST_W
:
2744 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2746 case OPC1_16_SSR_ST_W_POSTINC
:
2747 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
2748 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2753 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
2757 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
2760 case OPC1_16_SC_AND
:
2761 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
2763 case OPC1_16_SC_BISR
:
2764 gen_helper_1arg(bisr
, const16
& 0xff);
2766 case OPC1_16_SC_LD_A
:
2767 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2769 case OPC1_16_SC_LD_W
:
2770 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2772 case OPC1_16_SC_MOV
:
2773 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
2776 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
2778 case OPC1_16_SC_ST_A
:
2779 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2781 case OPC1_16_SC_ST_W
:
2782 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
2784 case OPC1_16_SC_SUB_A
:
2785 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
2790 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
2794 r1
= MASK_OP_SLR_D(ctx
->opcode
);
2795 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
2799 case OPC1_16_SLR_LD_A
:
2800 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2802 case OPC1_16_SLR_LD_A_POSTINC
:
2803 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
2804 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2806 case OPC1_16_SLR_LD_BU
:
2807 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2809 case OPC1_16_SLR_LD_BU_POSTINC
:
2810 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
2811 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
2813 case OPC1_16_SLR_LD_H
:
2814 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2816 case OPC1_16_SLR_LD_H_POSTINC
:
2817 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2818 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
2820 case OPC1_16_SLR_LD_W
:
2821 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2823 case OPC1_16_SLR_LD_W_POSTINC
:
2824 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
2825 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
2830 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
2835 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
2836 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
2840 case OPC1_16_SRO_LD_A
:
2841 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2843 case OPC1_16_SRO_LD_BU
:
2844 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2846 case OPC1_16_SRO_LD_H
:
2847 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_LESW
);
2849 case OPC1_16_SRO_LD_W
:
2850 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2852 case OPC1_16_SRO_ST_A
:
2853 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2855 case OPC1_16_SRO_ST_B
:
2856 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
2858 case OPC1_16_SRO_ST_H
:
2859 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
2861 case OPC1_16_SRO_ST_W
:
2862 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
2867 static void decode_sr_system(CPUTriCoreState
*env
, DisasContext
*ctx
)
2870 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2873 case OPC2_16_SR_NOP
:
2875 case OPC2_16_SR_RET
:
2876 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
2878 case OPC2_16_SR_RFE
:
2879 gen_helper_rfe(cpu_env
);
2881 ctx
->bstate
= BS_BRANCH
;
2883 case OPC2_16_SR_DEBUG
:
2884 /* raise EXCP_DEBUG */
2889 static void decode_sr_accu(CPUTriCoreState
*env
, DisasContext
*ctx
)
2895 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
2896 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
2899 case OPC2_16_SR_RSUB
:
2900 /* overflow only if r1 = -0x80000000 */
2901 temp
= tcg_const_i32(-0x80000000);
2903 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], temp
);
2904 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2906 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2908 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2910 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
2911 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
2913 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2914 tcg_temp_free(temp
);
2916 case OPC2_16_SR_SAT_B
:
2917 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
2919 case OPC2_16_SR_SAT_BU
:
2920 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
2922 case OPC2_16_SR_SAT_H
:
2923 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
2925 case OPC2_16_SR_SAT_HU
:
2926 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
2931 static void decode_16Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
2939 op1
= MASK_OP_MAJOR(ctx
->opcode
);
2941 /* handle ADDSC.A opcode only being 6 bit long */
2942 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
2943 op1
= OPC1_16_SRRS_ADDSC_A
;
2947 case OPC1_16_SRC_ADD
:
2948 case OPC1_16_SRC_ADD_A15
:
2949 case OPC1_16_SRC_ADD_15A
:
2950 case OPC1_16_SRC_ADD_A
:
2951 case OPC1_16_SRC_CADD
:
2952 case OPC1_16_SRC_CADDN
:
2953 case OPC1_16_SRC_CMOV
:
2954 case OPC1_16_SRC_CMOVN
:
2955 case OPC1_16_SRC_EQ
:
2956 case OPC1_16_SRC_LT
:
2957 case OPC1_16_SRC_MOV
:
2958 case OPC1_16_SRC_MOV_A
:
2959 case OPC1_16_SRC_SH
:
2960 case OPC1_16_SRC_SHA
:
2961 decode_src_opc(ctx
, op1
);
2964 case OPC1_16_SRR_ADD
:
2965 case OPC1_16_SRR_ADD_A15
:
2966 case OPC1_16_SRR_ADD_15A
:
2967 case OPC1_16_SRR_ADD_A
:
2968 case OPC1_16_SRR_ADDS
:
2969 case OPC1_16_SRR_AND
:
2970 case OPC1_16_SRR_CMOV
:
2971 case OPC1_16_SRR_CMOVN
:
2972 case OPC1_16_SRR_EQ
:
2973 case OPC1_16_SRR_LT
:
2974 case OPC1_16_SRR_MOV
:
2975 case OPC1_16_SRR_MOV_A
:
2976 case OPC1_16_SRR_MOV_AA
:
2977 case OPC1_16_SRR_MOV_D
:
2978 case OPC1_16_SRR_MUL
:
2979 case OPC1_16_SRR_OR
:
2980 case OPC1_16_SRR_SUB
:
2981 case OPC1_16_SRR_SUB_A15B
:
2982 case OPC1_16_SRR_SUB_15AB
:
2983 case OPC1_16_SRR_SUBS
:
2984 case OPC1_16_SRR_XOR
:
2985 decode_srr_opc(ctx
, op1
);
2988 case OPC1_16_SSR_ST_A
:
2989 case OPC1_16_SSR_ST_A_POSTINC
:
2990 case OPC1_16_SSR_ST_B
:
2991 case OPC1_16_SSR_ST_B_POSTINC
:
2992 case OPC1_16_SSR_ST_H
:
2993 case OPC1_16_SSR_ST_H_POSTINC
:
2994 case OPC1_16_SSR_ST_W
:
2995 case OPC1_16_SSR_ST_W_POSTINC
:
2996 decode_ssr_opc(ctx
, op1
);
2999 case OPC1_16_SRRS_ADDSC_A
:
3000 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3001 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3002 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3003 temp
= tcg_temp_new();
3004 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3005 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3006 tcg_temp_free(temp
);
3009 case OPC1_16_SLRO_LD_A
:
3010 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3011 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3012 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3014 case OPC1_16_SLRO_LD_BU
:
3015 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3016 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3017 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3019 case OPC1_16_SLRO_LD_H
:
3020 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3021 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3022 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3024 case OPC1_16_SLRO_LD_W
:
3025 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3026 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3027 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3030 case OPC1_16_SB_CALL
:
3032 case OPC1_16_SB_JNZ
:
3034 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3035 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3038 case OPC1_16_SBC_JEQ
:
3039 case OPC1_16_SBC_JNE
:
3040 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3041 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3042 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3045 case OPC1_16_SBRN_JNZ_T
:
3046 case OPC1_16_SBRN_JZ_T
:
3047 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3048 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3049 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3052 case OPC1_16_SBR_JEQ
:
3053 case OPC1_16_SBR_JGEZ
:
3054 case OPC1_16_SBR_JGTZ
:
3055 case OPC1_16_SBR_JLEZ
:
3056 case OPC1_16_SBR_JLTZ
:
3057 case OPC1_16_SBR_JNE
:
3058 case OPC1_16_SBR_JNZ
:
3059 case OPC1_16_SBR_JNZ_A
:
3060 case OPC1_16_SBR_JZ
:
3061 case OPC1_16_SBR_JZ_A
:
3062 case OPC1_16_SBR_LOOP
:
3063 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3064 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3065 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3068 case OPC1_16_SC_AND
:
3069 case OPC1_16_SC_BISR
:
3070 case OPC1_16_SC_LD_A
:
3071 case OPC1_16_SC_LD_W
:
3072 case OPC1_16_SC_MOV
:
3074 case OPC1_16_SC_ST_A
:
3075 case OPC1_16_SC_ST_W
:
3076 case OPC1_16_SC_SUB_A
:
3077 decode_sc_opc(ctx
, op1
);
3080 case OPC1_16_SLR_LD_A
:
3081 case OPC1_16_SLR_LD_A_POSTINC
:
3082 case OPC1_16_SLR_LD_BU
:
3083 case OPC1_16_SLR_LD_BU_POSTINC
:
3084 case OPC1_16_SLR_LD_H
:
3085 case OPC1_16_SLR_LD_H_POSTINC
:
3086 case OPC1_16_SLR_LD_W
:
3087 case OPC1_16_SLR_LD_W_POSTINC
:
3088 decode_slr_opc(ctx
, op1
);
3091 case OPC1_16_SRO_LD_A
:
3092 case OPC1_16_SRO_LD_BU
:
3093 case OPC1_16_SRO_LD_H
:
3094 case OPC1_16_SRO_LD_W
:
3095 case OPC1_16_SRO_ST_A
:
3096 case OPC1_16_SRO_ST_B
:
3097 case OPC1_16_SRO_ST_H
:
3098 case OPC1_16_SRO_ST_W
:
3099 decode_sro_opc(ctx
, op1
);
3102 case OPC1_16_SSRO_ST_A
:
3103 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3104 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3105 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3107 case OPC1_16_SSRO_ST_B
:
3108 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3109 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3110 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3112 case OPC1_16_SSRO_ST_H
:
3113 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3114 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3115 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3117 case OPC1_16_SSRO_ST_W
:
3118 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3119 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3120 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3123 case OPCM_16_SR_SYSTEM
:
3124 decode_sr_system(env
, ctx
);
3126 case OPCM_16_SR_ACCU
:
3127 decode_sr_accu(env
, ctx
);
3130 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3131 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3133 case OPC1_16_SR_NOT
:
3134 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3135 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3141 * 32 bit instructions
3145 static void decode_abs_ldw(CPUTriCoreState
*env
, DisasContext
*ctx
)
3152 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3153 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3154 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3156 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3159 case OPC2_32_ABS_LD_A
:
3160 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3162 case OPC2_32_ABS_LD_D
:
3163 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3165 case OPC2_32_ABS_LD_DA
:
3166 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3168 case OPC2_32_ABS_LD_W
:
3169 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3173 tcg_temp_free(temp
);
3176 static void decode_abs_ldb(CPUTriCoreState
*env
, DisasContext
*ctx
)
3183 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3184 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3185 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3187 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3190 case OPC2_32_ABS_LD_B
:
3191 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3193 case OPC2_32_ABS_LD_BU
:
3194 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3196 case OPC2_32_ABS_LD_H
:
3197 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3199 case OPC2_32_ABS_LD_HU
:
3200 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3204 tcg_temp_free(temp
);
3207 static void decode_abs_ldst_swap(CPUTriCoreState
*env
, DisasContext
*ctx
)
3214 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3215 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3216 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3218 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3221 case OPC2_32_ABS_LDMST
:
3222 gen_ldmst(ctx
, r1
, temp
);
3224 case OPC2_32_ABS_SWAP_W
:
3225 gen_swap(ctx
, r1
, temp
);
3229 tcg_temp_free(temp
);
3232 static void decode_abs_ldst_context(CPUTriCoreState
*env
, DisasContext
*ctx
)
3237 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3238 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3241 case OPC2_32_ABS_LDLCX
:
3242 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3244 case OPC2_32_ABS_LDUCX
:
3245 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3247 case OPC2_32_ABS_STLCX
:
3248 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3250 case OPC2_32_ABS_STUCX
:
3251 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3256 static void decode_abs_store(CPUTriCoreState
*env
, DisasContext
*ctx
)
3263 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3264 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3265 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3267 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3270 case OPC2_32_ABS_ST_A
:
3271 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3273 case OPC2_32_ABS_ST_D
:
3274 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3276 case OPC2_32_ABS_ST_DA
:
3277 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3279 case OPC2_32_ABS_ST_W
:
3280 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3284 tcg_temp_free(temp
);
3287 static void decode_abs_storeb_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
3294 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3295 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3296 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3298 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
3301 case OPC2_32_ABS_ST_B
:
3302 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3304 case OPC2_32_ABS_ST_H
:
3305 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3308 tcg_temp_free(temp
);
3313 static void decode_bit_andacc(CPUTriCoreState
*env
, DisasContext
*ctx
)
3319 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3320 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3321 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3322 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3323 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3324 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3328 case OPC2_32_BIT_AND_AND_T
:
3329 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3330 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3332 case OPC2_32_BIT_AND_ANDN_T
:
3333 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3334 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3336 case OPC2_32_BIT_AND_NOR_T
:
3337 if (TCG_TARGET_HAS_andc_i32
) {
3338 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3339 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3341 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3342 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3345 case OPC2_32_BIT_AND_OR_T
:
3346 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3347 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3352 static void decode_bit_logical_t(CPUTriCoreState
*env
, DisasContext
*ctx
)
3357 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3358 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3359 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3360 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3361 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3362 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3365 case OPC2_32_BIT_AND_T
:
3366 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3367 pos1
, pos2
, &tcg_gen_and_tl
);
3369 case OPC2_32_BIT_ANDN_T
:
3370 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3371 pos1
, pos2
, &tcg_gen_andc_tl
);
3373 case OPC2_32_BIT_NOR_T
:
3374 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3375 pos1
, pos2
, &tcg_gen_nor_tl
);
3377 case OPC2_32_BIT_OR_T
:
3378 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3379 pos1
, pos2
, &tcg_gen_or_tl
);
3384 static void decode_bit_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
3390 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3391 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3392 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3393 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3394 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3395 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3397 temp
= tcg_temp_new();
3399 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
3400 if (op2
== OPC2_32_BIT_INSN_T
) {
3401 tcg_gen_not_tl(temp
, temp
);
3403 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
3404 tcg_temp_free(temp
);
3407 static void decode_bit_logical_t2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3414 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3415 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3416 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3417 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3418 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3419 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3422 case OPC2_32_BIT_NAND_T
:
3423 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3424 pos1
, pos2
, &tcg_gen_nand_tl
);
3426 case OPC2_32_BIT_ORN_T
:
3427 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3428 pos1
, pos2
, &tcg_gen_orc_tl
);
3430 case OPC2_32_BIT_XNOR_T
:
3431 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3432 pos1
, pos2
, &tcg_gen_eqv_tl
);
3434 case OPC2_32_BIT_XOR_T
:
3435 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3436 pos1
, pos2
, &tcg_gen_xor_tl
);
3441 static void decode_bit_orand(CPUTriCoreState
*env
, DisasContext
*ctx
)
3448 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3449 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3450 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3451 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3452 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3453 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3456 case OPC2_32_BIT_OR_AND_T
:
3457 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3458 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
3460 case OPC2_32_BIT_OR_ANDN_T
:
3461 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3462 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
3464 case OPC2_32_BIT_OR_NOR_T
:
3465 if (TCG_TARGET_HAS_orc_i32
) {
3466 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3467 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
3469 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3470 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
3473 case OPC2_32_BIT_OR_OR_T
:
3474 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3475 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
3480 static void decode_bit_sh_logic1(CPUTriCoreState
*env
, DisasContext
*ctx
)
3487 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3488 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3489 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3490 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3491 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3492 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3494 temp
= tcg_temp_new();
3497 case OPC2_32_BIT_SH_AND_T
:
3498 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3499 pos1
, pos2
, &tcg_gen_and_tl
);
3501 case OPC2_32_BIT_SH_ANDN_T
:
3502 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3503 pos1
, pos2
, &tcg_gen_andc_tl
);
3505 case OPC2_32_BIT_SH_NOR_T
:
3506 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3507 pos1
, pos2
, &tcg_gen_nor_tl
);
3509 case OPC2_32_BIT_SH_OR_T
:
3510 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3511 pos1
, pos2
, &tcg_gen_or_tl
);
3514 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3515 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
3516 tcg_temp_free(temp
);
3519 static void decode_bit_sh_logic2(CPUTriCoreState
*env
, DisasContext
*ctx
)
3526 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3527 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3528 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3529 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3530 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3531 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3533 temp
= tcg_temp_new();
3536 case OPC2_32_BIT_SH_NAND_T
:
3537 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
3538 pos1
, pos2
, &tcg_gen_nand_tl
);
3540 case OPC2_32_BIT_SH_ORN_T
:
3541 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3542 pos1
, pos2
, &tcg_gen_orc_tl
);
3544 case OPC2_32_BIT_SH_XNOR_T
:
3545 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3546 pos1
, pos2
, &tcg_gen_eqv_tl
);
3548 case OPC2_32_BIT_SH_XOR_T
:
3549 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3550 pos1
, pos2
, &tcg_gen_xor_tl
);
3553 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
3554 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
3555 tcg_temp_free(temp
);
3561 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState
*env
,
3569 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3570 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3571 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3572 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3575 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
3576 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
3577 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
3578 /* instruction to access the cache */
3580 case OPC2_32_BO_CACHEA_WI_POSTINC
:
3581 case OPC2_32_BO_CACHEA_W_POSTINC
:
3582 case OPC2_32_BO_CACHEA_I_POSTINC
:
3583 /* instruction to access the cache, but we still need to handle
3584 the addressing mode */
3585 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3587 case OPC2_32_BO_CACHEA_WI_PREINC
:
3588 case OPC2_32_BO_CACHEA_W_PREINC
:
3589 case OPC2_32_BO_CACHEA_I_PREINC
:
3590 /* instruction to access the cache, but we still need to handle
3591 the addressing mode */
3592 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3594 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
3595 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
3596 /* TODO: Raise illegal opcode trap,
3597 if !tricore_feature(TRICORE_FEATURE_131) */
3599 case OPC2_32_BO_CACHEI_W_POSTINC
:
3600 case OPC2_32_BO_CACHEI_WI_POSTINC
:
3601 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
3602 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3603 } /* TODO: else raise illegal opcode trap */
3605 case OPC2_32_BO_CACHEI_W_PREINC
:
3606 case OPC2_32_BO_CACHEI_WI_PREINC
:
3607 if (tricore_feature(env
, TRICORE_FEATURE_131
)) {
3608 tcg_gen_addi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
], off10
);
3609 } /* TODO: else raise illegal opcode trap */
3611 case OPC2_32_BO_ST_A_SHORTOFF
:
3612 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
3614 case OPC2_32_BO_ST_A_POSTINC
:
3615 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3617 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3619 case OPC2_32_BO_ST_A_PREINC
:
3620 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
3622 case OPC2_32_BO_ST_B_SHORTOFF
:
3623 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3625 case OPC2_32_BO_ST_B_POSTINC
:
3626 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3628 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3630 case OPC2_32_BO_ST_B_PREINC
:
3631 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3633 case OPC2_32_BO_ST_D_SHORTOFF
:
3634 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
3637 case OPC2_32_BO_ST_D_POSTINC
:
3638 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
3639 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3641 case OPC2_32_BO_ST_D_PREINC
:
3642 temp
= tcg_temp_new();
3643 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3644 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3645 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3646 tcg_temp_free(temp
);
3648 case OPC2_32_BO_ST_DA_SHORTOFF
:
3649 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3652 case OPC2_32_BO_ST_DA_POSTINC
:
3653 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
3654 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3656 case OPC2_32_BO_ST_DA_PREINC
:
3657 temp
= tcg_temp_new();
3658 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3659 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3660 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3661 tcg_temp_free(temp
);
3663 case OPC2_32_BO_ST_H_SHORTOFF
:
3664 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3666 case OPC2_32_BO_ST_H_POSTINC
:
3667 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3669 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3671 case OPC2_32_BO_ST_H_PREINC
:
3672 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3674 case OPC2_32_BO_ST_Q_SHORTOFF
:
3675 temp
= tcg_temp_new();
3676 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3677 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3678 tcg_temp_free(temp
);
3680 case OPC2_32_BO_ST_Q_POSTINC
:
3681 temp
= tcg_temp_new();
3682 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3683 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
3685 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3686 tcg_temp_free(temp
);
3688 case OPC2_32_BO_ST_Q_PREINC
:
3689 temp
= tcg_temp_new();
3690 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3691 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3692 tcg_temp_free(temp
);
3694 case OPC2_32_BO_ST_W_SHORTOFF
:
3695 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3697 case OPC2_32_BO_ST_W_POSTINC
:
3698 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3700 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3702 case OPC2_32_BO_ST_W_PREINC
:
3703 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3708 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState
*env
,
3714 TCGv temp
, temp2
, temp3
;
3716 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3717 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3718 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3719 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3721 temp
= tcg_temp_new();
3722 temp2
= tcg_temp_new();
3723 temp3
= tcg_const_i32(off10
);
3725 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3726 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3729 case OPC2_32_BO_CACHEA_WI_BR
:
3730 case OPC2_32_BO_CACHEA_W_BR
:
3731 case OPC2_32_BO_CACHEA_I_BR
:
3732 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3734 case OPC2_32_BO_CACHEA_WI_CIRC
:
3735 case OPC2_32_BO_CACHEA_W_CIRC
:
3736 case OPC2_32_BO_CACHEA_I_CIRC
:
3737 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3739 case OPC2_32_BO_ST_A_BR
:
3740 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3741 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3743 case OPC2_32_BO_ST_A_CIRC
:
3744 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3745 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3747 case OPC2_32_BO_ST_B_BR
:
3748 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3749 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3751 case OPC2_32_BO_ST_B_CIRC
:
3752 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3753 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3755 case OPC2_32_BO_ST_D_BR
:
3756 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3757 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3759 case OPC2_32_BO_ST_D_CIRC
:
3760 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3761 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3762 tcg_gen_addi_tl(temp
, temp
, 4);
3763 tcg_gen_rem_tl(temp
, temp
, temp2
);
3764 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3765 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3766 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3768 case OPC2_32_BO_ST_DA_BR
:
3769 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
3770 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3772 case OPC2_32_BO_ST_DA_CIRC
:
3773 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3774 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3775 tcg_gen_addi_tl(temp
, temp
, 4);
3776 tcg_gen_rem_tl(temp
, temp
, temp2
);
3777 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3778 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3779 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3781 case OPC2_32_BO_ST_H_BR
:
3782 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3783 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3785 case OPC2_32_BO_ST_H_CIRC
:
3786 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
3787 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3789 case OPC2_32_BO_ST_Q_BR
:
3790 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3791 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3792 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3794 case OPC2_32_BO_ST_Q_CIRC
:
3795 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
3796 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
3797 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3799 case OPC2_32_BO_ST_W_BR
:
3800 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3801 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3803 case OPC2_32_BO_ST_W_CIRC
:
3804 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3805 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3808 tcg_temp_free(temp
);
3809 tcg_temp_free(temp2
);
3810 tcg_temp_free(temp3
);
3813 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState
*env
,
3821 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3822 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3823 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3824 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3827 case OPC2_32_BO_LD_A_SHORTOFF
:
3828 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3830 case OPC2_32_BO_LD_A_POSTINC
:
3831 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3833 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3835 case OPC2_32_BO_LD_A_PREINC
:
3836 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3838 case OPC2_32_BO_LD_B_SHORTOFF
:
3839 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3841 case OPC2_32_BO_LD_B_POSTINC
:
3842 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3844 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3846 case OPC2_32_BO_LD_B_PREINC
:
3847 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3849 case OPC2_32_BO_LD_BU_SHORTOFF
:
3850 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
3852 case OPC2_32_BO_LD_BU_POSTINC
:
3853 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3855 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3857 case OPC2_32_BO_LD_BU_PREINC
:
3858 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
3860 case OPC2_32_BO_LD_D_SHORTOFF
:
3861 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
3864 case OPC2_32_BO_LD_D_POSTINC
:
3865 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
3866 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3868 case OPC2_32_BO_LD_D_PREINC
:
3869 temp
= tcg_temp_new();
3870 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3871 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3872 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3873 tcg_temp_free(temp
);
3875 case OPC2_32_BO_LD_DA_SHORTOFF
:
3876 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3879 case OPC2_32_BO_LD_DA_POSTINC
:
3880 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
3881 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3883 case OPC2_32_BO_LD_DA_PREINC
:
3884 temp
= tcg_temp_new();
3885 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
3886 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3887 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
3888 tcg_temp_free(temp
);
3890 case OPC2_32_BO_LD_H_SHORTOFF
:
3891 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3893 case OPC2_32_BO_LD_H_POSTINC
:
3894 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3896 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3898 case OPC2_32_BO_LD_H_PREINC
:
3899 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
3901 case OPC2_32_BO_LD_HU_SHORTOFF
:
3902 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3904 case OPC2_32_BO_LD_HU_POSTINC
:
3905 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3907 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3909 case OPC2_32_BO_LD_HU_PREINC
:
3910 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3912 case OPC2_32_BO_LD_Q_SHORTOFF
:
3913 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3914 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3916 case OPC2_32_BO_LD_Q_POSTINC
:
3917 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3919 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3920 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3922 case OPC2_32_BO_LD_Q_PREINC
:
3923 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
3924 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
3926 case OPC2_32_BO_LD_W_SHORTOFF
:
3927 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3929 case OPC2_32_BO_LD_W_POSTINC
:
3930 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
3932 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
3934 case OPC2_32_BO_LD_W_PREINC
:
3935 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
3940 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState
*env
,
3947 TCGv temp
, temp2
, temp3
;
3949 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
3950 r2
= MASK_OP_BO_S2(ctx
->opcode
);
3951 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
3952 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
3954 temp
= tcg_temp_new();
3955 temp2
= tcg_temp_new();
3956 temp3
= tcg_const_i32(off10
);
3958 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
3959 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3963 case OPC2_32_BO_LD_A_BR
:
3964 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3965 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3967 case OPC2_32_BO_LD_A_CIRC
:
3968 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3969 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3971 case OPC2_32_BO_LD_B_BR
:
3972 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3973 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3975 case OPC2_32_BO_LD_B_CIRC
:
3976 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
3977 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3979 case OPC2_32_BO_LD_BU_BR
:
3980 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3981 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3983 case OPC2_32_BO_LD_BU_CIRC
:
3984 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
3985 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
3987 case OPC2_32_BO_LD_D_BR
:
3988 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
3989 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
3991 case OPC2_32_BO_LD_D_CIRC
:
3992 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
3993 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
3994 tcg_gen_addi_tl(temp
, temp
, 4);
3995 tcg_gen_rem_tl(temp
, temp
, temp2
);
3996 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
3997 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
3998 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4000 case OPC2_32_BO_LD_DA_BR
:
4001 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4002 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4004 case OPC2_32_BO_LD_DA_CIRC
:
4005 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4006 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4007 tcg_gen_addi_tl(temp
, temp
, 4);
4008 tcg_gen_rem_tl(temp
, temp
, temp2
);
4009 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4010 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4011 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4013 case OPC2_32_BO_LD_H_BR
:
4014 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4015 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4017 case OPC2_32_BO_LD_H_CIRC
:
4018 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4019 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4021 case OPC2_32_BO_LD_HU_BR
:
4022 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4023 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4025 case OPC2_32_BO_LD_HU_CIRC
:
4026 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4027 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4029 case OPC2_32_BO_LD_Q_BR
:
4030 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4031 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4032 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4034 case OPC2_32_BO_LD_Q_CIRC
:
4035 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4036 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4037 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4039 case OPC2_32_BO_LD_W_BR
:
4040 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4041 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4043 case OPC2_32_BO_LD_W_CIRC
:
4044 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4045 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4048 tcg_temp_free(temp
);
4049 tcg_temp_free(temp2
);
4050 tcg_temp_free(temp3
);
4053 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState
*env
,
4062 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4063 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4064 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4065 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4068 temp
= tcg_temp_new();
4069 temp2
= tcg_temp_new();
4072 case OPC2_32_BO_LDLCX_SHORTOFF
:
4073 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4074 gen_helper_ldlcx(cpu_env
, temp
);
4076 case OPC2_32_BO_LDMST_SHORTOFF
:
4077 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4078 gen_ldmst(ctx
, r1
, temp
);
4080 case OPC2_32_BO_LDMST_POSTINC
:
4081 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4082 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4084 case OPC2_32_BO_LDMST_PREINC
:
4085 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4086 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4088 case OPC2_32_BO_LDUCX_SHORTOFF
:
4089 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4090 gen_helper_lducx(cpu_env
, temp
);
4092 case OPC2_32_BO_LEA_SHORTOFF
:
4093 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4095 case OPC2_32_BO_STLCX_SHORTOFF
:
4096 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4097 gen_helper_stlcx(cpu_env
, temp
);
4099 case OPC2_32_BO_STUCX_SHORTOFF
:
4100 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4101 gen_helper_stucx(cpu_env
, temp
);
4103 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4104 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4105 gen_swap(ctx
, r1
, temp
);
4107 case OPC2_32_BO_SWAP_W_POSTINC
:
4108 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4109 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4111 case OPC2_32_BO_SWAP_W_PREINC
:
4112 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4113 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4116 tcg_temp_free(temp
);
4117 tcg_temp_free(temp2
);
4120 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState
*env
,
4127 TCGv temp
, temp2
, temp3
;
4129 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4130 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4131 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4132 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4134 temp
= tcg_temp_new();
4135 temp2
= tcg_temp_new();
4136 temp3
= tcg_const_i32(off10
);
4138 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4139 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4142 case OPC2_32_BO_LDMST_BR
:
4143 gen_ldmst(ctx
, r1
, temp2
);
4144 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4146 case OPC2_32_BO_LDMST_CIRC
:
4147 gen_ldmst(ctx
, r1
, temp2
);
4148 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4150 case OPC2_32_BO_SWAP_W_BR
:
4151 gen_swap(ctx
, r1
, temp2
);
4152 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4154 case OPC2_32_BO_SWAP_W_CIRC
:
4155 gen_swap(ctx
, r1
, temp2
);
4156 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], temp3
);
4159 tcg_temp_free(temp
);
4160 tcg_temp_free(temp2
);
4161 tcg_temp_free(temp3
);
4164 static void decode_bol_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int32_t op1
)
4170 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4171 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4172 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4175 case OPC1_32_BOL_LD_A_LONGOFF
:
4176 temp
= tcg_temp_new();
4177 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4178 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4179 tcg_temp_free(temp
);
4181 case OPC1_32_BOL_LD_W_LONGOFF
:
4182 temp
= tcg_temp_new();
4183 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4184 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4185 tcg_temp_free(temp
);
4187 case OPC1_32_BOL_LEA_LONGOFF
:
4188 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4190 case OPC1_32_BOL_ST_A_LONGOFF
:
4191 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4192 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4194 /* raise illegal opcode trap */
4197 case OPC1_32_BOL_ST_W_LONGOFF
:
4198 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4200 case OPC1_32_BOL_LD_B_LONGOFF
:
4201 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4202 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4204 /* raise illegal opcode trap */
4207 case OPC1_32_BOL_LD_BU_LONGOFF
:
4208 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4209 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4211 /* raise illegal opcode trap */
4214 case OPC1_32_BOL_LD_H_LONGOFF
:
4215 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4216 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4218 /* raise illegal opcode trap */
4221 case OPC1_32_BOL_LD_HU_LONGOFF
:
4222 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4223 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4225 /* raise illegal opcode trap */
4228 case OPC1_32_BOL_ST_B_LONGOFF
:
4229 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4230 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4232 /* raise illegal opcode trap */
4235 case OPC1_32_BOL_ST_H_LONGOFF
:
4236 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4237 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4239 /* raise illegal opcode trap */
4246 static void decode_rc_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
4253 r2
= MASK_OP_RC_D(ctx
->opcode
);
4254 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4255 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4256 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4258 temp
= tcg_temp_new();
4261 case OPC2_32_RC_AND
:
4262 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4264 case OPC2_32_RC_ANDN
:
4265 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4267 case OPC2_32_RC_NAND
:
4268 tcg_gen_movi_tl(temp
, const9
);
4269 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4271 case OPC2_32_RC_NOR
:
4272 tcg_gen_movi_tl(temp
, const9
);
4273 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4276 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4278 case OPC2_32_RC_ORN
:
4279 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4282 const9
= sextract32(const9
, 0, 6);
4283 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4285 case OPC2_32_RC_SH_H
:
4286 const9
= sextract32(const9
, 0, 5);
4287 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4289 case OPC2_32_RC_SHA
:
4290 const9
= sextract32(const9
, 0, 6);
4291 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4293 case OPC2_32_RC_SHA_H
:
4294 const9
= sextract32(const9
, 0, 5);
4295 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4297 case OPC2_32_RC_SHAS
:
4298 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4300 case OPC2_32_RC_XNOR
:
4301 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4302 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
4304 case OPC2_32_RC_XOR
:
4305 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4308 tcg_temp_free(temp
);
4311 static void decode_rc_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4319 r2
= MASK_OP_RC_D(ctx
->opcode
);
4320 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4321 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4323 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4325 temp
= tcg_temp_new();
4328 case OPC2_32_RC_ABSDIF
:
4329 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4331 case OPC2_32_RC_ABSDIFS
:
4332 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4334 case OPC2_32_RC_ADD
:
4335 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4337 case OPC2_32_RC_ADDC
:
4338 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4340 case OPC2_32_RC_ADDS
:
4341 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4343 case OPC2_32_RC_ADDS_U
:
4344 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4346 case OPC2_32_RC_ADDX
:
4347 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4349 case OPC2_32_RC_AND_EQ
:
4350 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4351 const9
, &tcg_gen_and_tl
);
4353 case OPC2_32_RC_AND_GE
:
4354 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4355 const9
, &tcg_gen_and_tl
);
4357 case OPC2_32_RC_AND_GE_U
:
4358 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4359 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4360 const9
, &tcg_gen_and_tl
);
4362 case OPC2_32_RC_AND_LT
:
4363 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4364 const9
, &tcg_gen_and_tl
);
4366 case OPC2_32_RC_AND_LT_U
:
4367 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4368 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4369 const9
, &tcg_gen_and_tl
);
4371 case OPC2_32_RC_AND_NE
:
4372 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4373 const9
, &tcg_gen_and_tl
);
4376 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4378 case OPC2_32_RC_EQANY_B
:
4379 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4381 case OPC2_32_RC_EQANY_H
:
4382 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4385 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4387 case OPC2_32_RC_GE_U
:
4388 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4389 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4392 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4394 case OPC2_32_RC_LT_U
:
4395 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4396 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4398 case OPC2_32_RC_MAX
:
4399 tcg_gen_movi_tl(temp
, const9
);
4400 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4401 cpu_gpr_d
[r1
], temp
);
4403 case OPC2_32_RC_MAX_U
:
4404 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4405 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4406 cpu_gpr_d
[r1
], temp
);
4408 case OPC2_32_RC_MIN
:
4409 tcg_gen_movi_tl(temp
, const9
);
4410 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4411 cpu_gpr_d
[r1
], temp
);
4413 case OPC2_32_RC_MIN_U
:
4414 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
4415 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
4416 cpu_gpr_d
[r1
], temp
);
4419 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4421 case OPC2_32_RC_OR_EQ
:
4422 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4423 const9
, &tcg_gen_or_tl
);
4425 case OPC2_32_RC_OR_GE
:
4426 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4427 const9
, &tcg_gen_or_tl
);
4429 case OPC2_32_RC_OR_GE_U
:
4430 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4431 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4432 const9
, &tcg_gen_or_tl
);
4434 case OPC2_32_RC_OR_LT
:
4435 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4436 const9
, &tcg_gen_or_tl
);
4438 case OPC2_32_RC_OR_LT_U
:
4439 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4440 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4441 const9
, &tcg_gen_or_tl
);
4443 case OPC2_32_RC_OR_NE
:
4444 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4445 const9
, &tcg_gen_or_tl
);
4447 case OPC2_32_RC_RSUB
:
4448 tcg_gen_movi_tl(temp
, const9
);
4449 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4451 case OPC2_32_RC_RSUBS
:
4452 tcg_gen_movi_tl(temp
, const9
);
4453 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4455 case OPC2_32_RC_RSUBS_U
:
4456 tcg_gen_movi_tl(temp
, const9
);
4457 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
4459 case OPC2_32_RC_SH_EQ
:
4460 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4462 case OPC2_32_RC_SH_GE
:
4463 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4465 case OPC2_32_RC_SH_GE_U
:
4466 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4467 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4469 case OPC2_32_RC_SH_LT
:
4470 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4472 case OPC2_32_RC_SH_LT_U
:
4473 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4474 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4476 case OPC2_32_RC_SH_NE
:
4477 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4479 case OPC2_32_RC_XOR_EQ
:
4480 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4481 const9
, &tcg_gen_xor_tl
);
4483 case OPC2_32_RC_XOR_GE
:
4484 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4485 const9
, &tcg_gen_xor_tl
);
4487 case OPC2_32_RC_XOR_GE_U
:
4488 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4489 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4490 const9
, &tcg_gen_xor_tl
);
4492 case OPC2_32_RC_XOR_LT
:
4493 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4494 const9
, &tcg_gen_xor_tl
);
4496 case OPC2_32_RC_XOR_LT_U
:
4497 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4498 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4499 const9
, &tcg_gen_xor_tl
);
4501 case OPC2_32_RC_XOR_NE
:
4502 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
4503 const9
, &tcg_gen_xor_tl
);
4506 tcg_temp_free(temp
);
4509 static void decode_rc_serviceroutine(CPUTriCoreState
*env
, DisasContext
*ctx
)
4514 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4515 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4518 case OPC2_32_RC_BISR
:
4519 gen_helper_1arg(bisr
, const9
);
4521 case OPC2_32_RC_SYSCALL
:
4522 /* TODO: Add exception generation */
4527 static void decode_rc_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
4533 r2
= MASK_OP_RC_D(ctx
->opcode
);
4534 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4535 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
4537 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4540 case OPC2_32_RC_MUL_32
:
4541 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4543 case OPC2_32_RC_MUL_64
:
4544 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4546 case OPC2_32_RC_MULS_32
:
4547 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4549 case OPC2_32_RC_MUL_U_64
:
4550 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4551 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
4553 case OPC2_32_RC_MULS_U_32
:
4554 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4555 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4561 static void decode_rcpw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4565 int32_t pos
, width
, const4
;
4569 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
4570 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
4571 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
4572 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
4573 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
4574 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
4577 case OPC2_32_RCPW_IMASK
:
4578 /* if pos + width > 31 undefined result */
4579 if (pos
+ width
<= 31) {
4580 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
4581 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
4584 case OPC2_32_RCPW_INSERT
:
4585 /* if pos + width > 32 undefined result */
4586 if (pos
+ width
<= 32) {
4587 temp
= tcg_const_i32(const4
);
4588 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
4589 tcg_temp_free(temp
);
4597 static void decode_rcrw_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
4601 int32_t width
, const4
;
4603 TCGv temp
, temp2
, temp3
;
4605 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
4606 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
4607 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
4608 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
4609 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
4610 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
4612 temp
= tcg_temp_new();
4613 temp2
= tcg_temp_new();
4616 case OPC2_32_RCRW_IMASK
:
4617 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r4
], 0x1f);
4618 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
4619 tcg_gen_shl_tl(cpu_gpr_d
[r3
+ 1], temp2
, temp
);
4620 tcg_gen_movi_tl(temp2
, const4
);
4621 tcg_gen_shl_tl(cpu_gpr_d
[r3
], temp2
, temp
);
4623 case OPC2_32_RCRW_INSERT
:
4624 temp3
= tcg_temp_new();
4626 tcg_gen_movi_tl(temp
, width
);
4627 tcg_gen_movi_tl(temp2
, const4
);
4628 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r4
], 0x1f);
4629 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
4631 tcg_temp_free(temp3
);
4634 tcg_temp_free(temp
);
4635 tcg_temp_free(temp2
);
4640 static void decode_rcr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
4648 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4649 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4650 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4651 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4652 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4655 case OPC2_32_RCR_CADD
:
4656 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
4659 case OPC2_32_RCR_CADDN
:
4660 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r3
],
4663 case OPC2_32_RCR_SEL
:
4664 temp
= tcg_const_i32(0);
4665 temp2
= tcg_const_i32(const9
);
4666 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
4667 cpu_gpr_d
[r1
], temp2
);
4668 tcg_temp_free(temp
);
4669 tcg_temp_free(temp2
);
4671 case OPC2_32_RCR_SELN
:
4672 temp
= tcg_const_i32(0);
4673 temp2
= tcg_const_i32(const9
);
4674 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
4675 cpu_gpr_d
[r1
], temp2
);
4676 tcg_temp_free(temp
);
4677 tcg_temp_free(temp2
);
4682 static void decode_rcr_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
4689 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4690 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4691 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4692 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4693 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4696 case OPC2_32_RCR_MADD_32
:
4697 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4699 case OPC2_32_RCR_MADD_64
:
4700 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4701 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4703 case OPC2_32_RCR_MADDS_32
:
4704 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4706 case OPC2_32_RCR_MADDS_64
:
4707 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4708 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4710 case OPC2_32_RCR_MADD_U_64
:
4711 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4712 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4713 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4715 case OPC2_32_RCR_MADDS_U_32
:
4716 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4717 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4719 case OPC2_32_RCR_MADDS_U_64
:
4720 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4721 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4722 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4727 static void decode_rcr_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
4734 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
4735 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
4736 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
4737 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
4738 r4
= MASK_OP_RCR_D(ctx
->opcode
);
4741 case OPC2_32_RCR_MSUB_32
:
4742 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4744 case OPC2_32_RCR_MSUB_64
:
4745 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4746 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4748 case OPC2_32_RCR_MSUBS_32
:
4749 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4751 case OPC2_32_RCR_MSUBS_64
:
4752 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4753 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4755 case OPC2_32_RCR_MSUB_U_64
:
4756 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4757 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4758 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4760 case OPC2_32_RCR_MSUBS_U_32
:
4761 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4762 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
4764 case OPC2_32_RCR_MSUBS_U_64
:
4765 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
4766 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
4767 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
4774 static void decode_rlc_opc(CPUTriCoreState
*env
, DisasContext
*ctx
,
4780 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
4781 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
4782 r2
= MASK_OP_RLC_D(ctx
->opcode
);
4785 case OPC1_32_RLC_ADDI
:
4786 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
4788 case OPC1_32_RLC_ADDIH
:
4789 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
4791 case OPC1_32_RLC_ADDIH_A
:
4792 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
4794 case OPC1_32_RLC_MFCR
:
4795 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4796 gen_mfcr(env
, cpu_gpr_d
[r2
], const16
);
4798 case OPC1_32_RLC_MOV
:
4799 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4801 case OPC1_32_RLC_MOV_64
:
4802 if (tricore_feature(env
, TRICORE_FEATURE_16
)) {
4803 if ((r2
& 0x1) != 0) {
4804 /* TODO: raise OPD trap */
4806 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4807 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
4809 /* TODO: raise illegal opcode trap */
4812 case OPC1_32_RLC_MOV_U
:
4813 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4814 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
4816 case OPC1_32_RLC_MOV_H
:
4817 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
4819 case OPC1_32_RLC_MOVH_A
:
4820 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
4822 case OPC1_32_RLC_MTCR
:
4823 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
4824 gen_mtcr(env
, ctx
, cpu_gpr_d
[r1
], const16
);
4830 static void decode_rr_accumulator(CPUTriCoreState
*env
, DisasContext
*ctx
)
4835 r3
= MASK_OP_RR_D(ctx
->opcode
);
4836 r2
= MASK_OP_RR_S2(ctx
->opcode
);
4837 r1
= MASK_OP_RR_S1(ctx
->opcode
);
4838 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
4841 case OPC2_32_RR_ABS
:
4842 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
4844 case OPC2_32_RR_ABS_B
:
4845 gen_helper_abs_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4847 case OPC2_32_RR_ABS_H
:
4848 gen_helper_abs_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4850 case OPC2_32_RR_ABSDIF
:
4851 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4853 case OPC2_32_RR_ABSDIF_B
:
4854 gen_helper_absdif_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4857 case OPC2_32_RR_ABSDIF_H
:
4858 gen_helper_absdif_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4861 case OPC2_32_RR_ABSDIFS
:
4862 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4865 case OPC2_32_RR_ABSDIFS_H
:
4866 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4869 case OPC2_32_RR_ABSS
:
4870 gen_helper_abs_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4872 case OPC2_32_RR_ABSS_H
:
4873 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r2
]);
4875 case OPC2_32_RR_ADD
:
4876 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4878 case OPC2_32_RR_ADD_B
:
4879 gen_helper_add_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4881 case OPC2_32_RR_ADD_H
:
4882 gen_helper_add_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4884 case OPC2_32_RR_ADDC
:
4885 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4887 case OPC2_32_RR_ADDS
:
4888 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4890 case OPC2_32_RR_ADDS_H
:
4891 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4894 case OPC2_32_RR_ADDS_HU
:
4895 gen_helper_add_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4898 case OPC2_32_RR_ADDS_U
:
4899 gen_helper_add_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
4902 case OPC2_32_RR_ADDX
:
4903 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4905 case OPC2_32_RR_AND_EQ
:
4906 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4907 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4909 case OPC2_32_RR_AND_GE
:
4910 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4911 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4913 case OPC2_32_RR_AND_GE_U
:
4914 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4915 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4917 case OPC2_32_RR_AND_LT
:
4918 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4919 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4921 case OPC2_32_RR_AND_LT_U
:
4922 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4923 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4925 case OPC2_32_RR_AND_NE
:
4926 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4927 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
4930 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4933 case OPC2_32_RR_EQ_B
:
4934 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4936 case OPC2_32_RR_EQ_H
:
4937 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4939 case OPC2_32_RR_EQ_W
:
4940 gen_cond_w(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4942 case OPC2_32_RR_EQANY_B
:
4943 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4945 case OPC2_32_RR_EQANY_H
:
4946 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4949 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4952 case OPC2_32_RR_GE_U
:
4953 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4957 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4960 case OPC2_32_RR_LT_U
:
4961 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4964 case OPC2_32_RR_LT_B
:
4965 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4967 case OPC2_32_RR_LT_BU
:
4968 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4970 case OPC2_32_RR_LT_H
:
4971 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4973 case OPC2_32_RR_LT_HU
:
4974 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4976 case OPC2_32_RR_LT_W
:
4977 gen_cond_w(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4979 case OPC2_32_RR_LT_WU
:
4980 gen_cond_w(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4982 case OPC2_32_RR_MAX
:
4983 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4984 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4986 case OPC2_32_RR_MAX_U
:
4987 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
4988 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4990 case OPC2_32_RR_MAX_B
:
4991 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4993 case OPC2_32_RR_MAX_BU
:
4994 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4996 case OPC2_32_RR_MAX_H
:
4997 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
4999 case OPC2_32_RR_MAX_HU
:
5000 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5002 case OPC2_32_RR_MIN
:
5003 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5004 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5006 case OPC2_32_RR_MIN_U
:
5007 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5008 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5010 case OPC2_32_RR_MIN_B
:
5011 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5013 case OPC2_32_RR_MIN_BU
:
5014 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5016 case OPC2_32_RR_MIN_H
:
5017 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5019 case OPC2_32_RR_MIN_HU
:
5020 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5022 case OPC2_32_RR_MOV
:
5023 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5026 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5029 case OPC2_32_RR_OR_EQ
:
5030 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5031 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5033 case OPC2_32_RR_OR_GE
:
5034 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5035 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5037 case OPC2_32_RR_OR_GE_U
:
5038 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5039 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5041 case OPC2_32_RR_OR_LT
:
5042 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5043 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5045 case OPC2_32_RR_OR_LT_U
:
5046 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5047 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5049 case OPC2_32_RR_OR_NE
:
5050 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5051 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5053 case OPC2_32_RR_SAT_B
:
5054 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5056 case OPC2_32_RR_SAT_BU
:
5057 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5059 case OPC2_32_RR_SAT_H
:
5060 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5062 case OPC2_32_RR_SAT_HU
:
5063 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5065 case OPC2_32_RR_SH_EQ
:
5066 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5069 case OPC2_32_RR_SH_GE
:
5070 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5073 case OPC2_32_RR_SH_GE_U
:
5074 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5077 case OPC2_32_RR_SH_LT
:
5078 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5081 case OPC2_32_RR_SH_LT_U
:
5082 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5085 case OPC2_32_RR_SH_NE
:
5086 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5089 case OPC2_32_RR_SUB
:
5090 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5092 case OPC2_32_RR_SUB_B
:
5093 gen_helper_sub_b(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5095 case OPC2_32_RR_SUB_H
:
5096 gen_helper_sub_h(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5098 case OPC2_32_RR_SUBC
:
5099 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5101 case OPC2_32_RR_SUBS
:
5102 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5104 case OPC2_32_RR_SUBS_U
:
5105 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5107 case OPC2_32_RR_SUBS_H
:
5108 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5111 case OPC2_32_RR_SUBS_HU
:
5112 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5115 case OPC2_32_RR_SUBX
:
5116 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5118 case OPC2_32_RR_XOR_EQ
:
5119 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5120 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5122 case OPC2_32_RR_XOR_GE
:
5123 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5124 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5126 case OPC2_32_RR_XOR_GE_U
:
5127 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5128 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5130 case OPC2_32_RR_XOR_LT
:
5131 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5132 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5134 case OPC2_32_RR_XOR_LT_U
:
5135 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5136 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5138 case OPC2_32_RR_XOR_NE
:
5139 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5140 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5145 static void decode_rr_logical_shift(CPUTriCoreState
*env
, DisasContext
*ctx
)
5151 r3
= MASK_OP_RR_D(ctx
->opcode
);
5152 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5153 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5155 temp
= tcg_temp_new();
5156 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5159 case OPC2_32_RR_AND
:
5160 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5162 case OPC2_32_RR_ANDN
:
5163 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5165 case OPC2_32_RR_CLO
:
5166 gen_helper_clo(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5168 case OPC2_32_RR_CLO_H
:
5169 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5171 case OPC2_32_RR_CLS
:
5172 gen_helper_cls(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5174 case OPC2_32_RR_CLS_H
:
5175 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5177 case OPC2_32_RR_CLZ
:
5178 gen_helper_clz(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5180 case OPC2_32_RR_CLZ_H
:
5181 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5183 case OPC2_32_RR_NAND
:
5184 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5186 case OPC2_32_RR_NOR
:
5187 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5190 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5192 case OPC2_32_RR_ORN
:
5193 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5196 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5198 case OPC2_32_RR_SH_H
:
5199 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5201 case OPC2_32_RR_SHA
:
5202 gen_helper_sha(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5204 case OPC2_32_RR_SHA_H
:
5205 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5207 case OPC2_32_RR_SHAS
:
5208 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5210 case OPC2_32_RR_XNOR
:
5211 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5213 case OPC2_32_RR_XOR
:
5214 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5217 tcg_temp_free(temp
);
5220 static void decode_rr_address(CPUTriCoreState
*env
, DisasContext
*ctx
)
5226 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5227 r3
= MASK_OP_RR_D(ctx
->opcode
);
5228 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5229 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5230 n
= MASK_OP_RR_N(ctx
->opcode
);
5233 case OPC2_32_RR_ADD_A
:
5234 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5236 case OPC2_32_RR_ADDSC_A
:
5237 temp
= tcg_temp_new();
5238 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
5239 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
5240 tcg_temp_free(temp
);
5242 case OPC2_32_RR_ADDSC_AT
:
5243 temp
= tcg_temp_new();
5244 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
5245 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
5246 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
5247 tcg_temp_free(temp
);
5249 case OPC2_32_RR_EQ_A
:
5250 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5253 case OPC2_32_RR_EQZ
:
5254 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
5256 case OPC2_32_RR_GE_A
:
5257 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5260 case OPC2_32_RR_LT_A
:
5261 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5264 case OPC2_32_RR_MOV_A
:
5265 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
5267 case OPC2_32_RR_MOV_AA
:
5268 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
5270 case OPC2_32_RR_MOV_D
:
5271 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
5273 case OPC2_32_RR_NE_A
:
5274 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
5277 case OPC2_32_RR_NEZ_A
:
5278 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
5280 case OPC2_32_RR_SUB_A
:
5281 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
5286 static void decode_rr_idirect(CPUTriCoreState
*env
, DisasContext
*ctx
)
5291 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5292 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5296 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5298 case OPC2_32_RR_JLI
:
5299 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->next_pc
);
5300 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5302 case OPC2_32_RR_CALLI
:
5303 gen_helper_1arg(call
, ctx
->next_pc
);
5304 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
5308 ctx
->bstate
= BS_BRANCH
;
5311 static void decode_rr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
5318 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5319 r3
= MASK_OP_RR_D(ctx
->opcode
);
5320 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5321 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5324 case OPC2_32_RR_BMERGE
:
5325 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5327 case OPC2_32_RR_BSPLIT
:
5328 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5330 case OPC2_32_RR_DVINIT_B
:
5331 gen_dvinit_b(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5334 case OPC2_32_RR_DVINIT_BU
:
5335 temp
= tcg_temp_new();
5336 temp2
= tcg_temp_new();
5338 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5339 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5340 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5341 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5342 /* use cpu_PSW_AV to compare against 0 */
5343 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5344 temp
, cpu_gpr_d
[r3
+1]);
5345 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5346 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5347 temp2
, cpu_gpr_d
[r2
]);
5348 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5350 /* overflow = (D[b] == 0) */
5351 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5353 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5355 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5357 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 8);
5358 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
5359 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp
);
5361 tcg_temp_free(temp
);
5362 tcg_temp_free(temp2
);
5364 case OPC2_32_RR_DVINIT_H
:
5365 gen_dvinit_h(env
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5368 case OPC2_32_RR_DVINIT_HU
:
5369 temp
= tcg_temp_new();
5370 temp2
= tcg_temp_new();
5372 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5373 if (!tricore_feature(env
, TRICORE_FEATURE_131
)) {
5374 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
5375 tcg_gen_neg_tl(temp
, cpu_gpr_d
[r3
+1]);
5376 /* use cpu_PSW_AV to compare against 0 */
5377 tcg_gen_movcond_tl(TCG_COND_LT
, temp
, cpu_gpr_d
[r3
+1], cpu_PSW_AV
,
5378 temp
, cpu_gpr_d
[r3
+1]);
5379 tcg_gen_neg_tl(temp2
, cpu_gpr_d
[r2
]);
5380 tcg_gen_movcond_tl(TCG_COND_LT
, temp2
, cpu_gpr_d
[r2
], cpu_PSW_AV
,
5381 temp2
, cpu_gpr_d
[r2
]);
5382 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
5384 /* overflow = (D[b] == 0) */
5385 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5387 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5389 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5391 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5392 tcg_gen_shri_tl(cpu_gpr_d
[r3
+1], temp
, 16);
5393 tcg_gen_shli_tl(cpu_gpr_d
[r3
], temp
, 16);
5394 tcg_temp_free(temp
);
5395 tcg_temp_free(temp2
);
5397 case OPC2_32_RR_DVINIT
:
5398 temp
= tcg_temp_new();
5399 temp2
= tcg_temp_new();
5400 /* overflow = ((D[b] == 0) ||
5401 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
5402 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
5403 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
5404 tcg_gen_and_tl(temp
, temp
, temp2
);
5405 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
5406 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
5407 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5409 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5411 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5413 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5414 /* sign extend to high reg */
5415 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
5416 tcg_temp_free(temp
);
5417 tcg_temp_free(temp2
);
5419 case OPC2_32_RR_DVINIT_U
:
5420 /* overflow = (D[b] == 0) */
5421 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
5422 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
5424 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
5426 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
5428 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5429 /* zero extend to high reg*/
5430 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
5432 case OPC2_32_RR_PARITY
:
5433 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5435 case OPC2_32_RR_UNPACK
:
5436 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5442 static void decode_rr1_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5450 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
5451 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
5452 r3
= MASK_OP_RR1_D(ctx
->opcode
);
5453 n
= tcg_const_i32(MASK_OP_RR1_N(ctx
->opcode
));
5454 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
5457 case OPC2_32_RR1_MUL_H_32_LL
:
5458 temp64
= tcg_temp_new_i64();
5459 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5460 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5461 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5462 tcg_temp_free_i64(temp64
);
5464 case OPC2_32_RR1_MUL_H_32_LU
:
5465 temp64
= tcg_temp_new_i64();
5466 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5467 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5468 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5469 tcg_temp_free_i64(temp64
);
5471 case OPC2_32_RR1_MUL_H_32_UL
:
5472 temp64
= tcg_temp_new_i64();
5473 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5474 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5475 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5476 tcg_temp_free_i64(temp64
);
5478 case OPC2_32_RR1_MUL_H_32_UU
:
5479 temp64
= tcg_temp_new_i64();
5480 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5481 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5482 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
5483 tcg_temp_free_i64(temp64
);
5485 case OPC2_32_RR1_MULM_H_64_LL
:
5486 temp64
= tcg_temp_new_i64();
5487 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5488 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5490 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5492 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5493 tcg_temp_free_i64(temp64
);
5495 case OPC2_32_RR1_MULM_H_64_LU
:
5496 temp64
= tcg_temp_new_i64();
5497 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5498 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5500 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5502 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5503 tcg_temp_free_i64(temp64
);
5505 case OPC2_32_RR1_MULM_H_64_UL
:
5506 temp64
= tcg_temp_new_i64();
5507 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5508 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5510 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5512 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5513 tcg_temp_free_i64(temp64
);
5515 case OPC2_32_RR1_MULM_H_64_UU
:
5516 temp64
= tcg_temp_new_i64();
5517 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5518 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
5520 tcg_gen_movi_tl(cpu_PSW_V
, 0);
5522 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
5523 tcg_temp_free_i64(temp64
);
5526 case OPC2_32_RR1_MULR_H_16_LL
:
5527 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5528 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5530 case OPC2_32_RR1_MULR_H_16_LU
:
5531 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5532 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5534 case OPC2_32_RR1_MULR_H_16_UL
:
5535 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5536 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5538 case OPC2_32_RR1_MULR_H_16_UU
:
5539 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
5540 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
5546 static void decode_rr1_mulq(CPUTriCoreState
*env
, DisasContext
*ctx
)
5554 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
5555 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
5556 r3
= MASK_OP_RR1_D(ctx
->opcode
);
5557 n
= MASK_OP_RR1_N(ctx
->opcode
);
5558 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
5560 temp
= tcg_temp_new();
5561 temp2
= tcg_temp_new();
5564 case OPC2_32_RR1_MUL_Q_32
:
5565 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
5567 case OPC2_32_RR1_MUL_Q_64
:
5568 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5571 case OPC2_32_RR1_MUL_Q_32_L
:
5572 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
5573 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
5575 case OPC2_32_RR1_MUL_Q_64_L
:
5576 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
5577 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
5579 case OPC2_32_RR1_MUL_Q_32_U
:
5580 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
5581 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
5583 case OPC2_32_RR1_MUL_Q_64_U
:
5584 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
5585 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
5587 case OPC2_32_RR1_MUL_Q_32_LL
:
5588 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
5589 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
5590 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5592 case OPC2_32_RR1_MUL_Q_32_UU
:
5593 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
5594 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
5595 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5597 case OPC2_32_RR1_MULR_Q_32_L
:
5598 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
5599 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
5600 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5602 case OPC2_32_RR1_MULR_Q_32_U
:
5603 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
5604 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
5605 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
5608 tcg_temp_free(temp
);
5609 tcg_temp_free(temp2
);
5613 static void decode_rr2_mul(CPUTriCoreState
*env
, DisasContext
*ctx
)
5618 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
5619 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
5620 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
5621 r3
= MASK_OP_RR2_D(ctx
->opcode
);
5623 case OPC2_32_RR2_MUL_32
:
5624 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5626 case OPC2_32_RR2_MUL_64
:
5627 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5630 case OPC2_32_RR2_MULS_32
:
5631 gen_helper_mul_ssov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5634 case OPC2_32_RR2_MUL_U_64
:
5635 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
5638 case OPC2_32_RR2_MULS_U_32
:
5639 gen_helper_mul_suov(cpu_gpr_d
[r3
], cpu_env
, cpu_gpr_d
[r1
],
5646 static void decode_rrpw_extract_insert(CPUTriCoreState
*env
, DisasContext
*ctx
)
5652 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
5653 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
5654 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
5655 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
5656 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
5657 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
5660 case OPC2_32_RRPW_EXTR
:
5661 if (pos
+ width
<= 31) {
5662 /* optimize special cases */
5663 if ((pos
== 0) && (width
== 8)) {
5664 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5665 } else if ((pos
== 0) && (width
== 16)) {
5666 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5668 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
5669 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
5673 case OPC2_32_RRPW_EXTR_U
:
5675 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
5677 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
5678 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
5681 case OPC2_32_RRPW_IMASK
:
5682 if (pos
+ width
<= 31) {
5683 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], ((1u << width
) - 1) << pos
);
5684 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
5687 case OPC2_32_RRPW_INSERT
:
5688 if (pos
+ width
<= 31) {
5689 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5697 static void decode_rrr_cond_select(CPUTriCoreState
*env
, DisasContext
*ctx
)
5703 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
5704 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
5705 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
5706 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
5707 r4
= MASK_OP_RRR_D(ctx
->opcode
);
5710 case OPC2_32_RRR_CADD
:
5711 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
5712 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
5714 case OPC2_32_RRR_CADDN
:
5715 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5718 case OPC2_32_RRR_CSUB
:
5719 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5722 case OPC2_32_RRR_CSUBN
:
5723 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
5726 case OPC2_32_RRR_SEL
:
5727 temp
= tcg_const_i32(0);
5728 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5729 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5730 tcg_temp_free(temp
);
5732 case OPC2_32_RRR_SELN
:
5733 temp
= tcg_const_i32(0);
5734 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5735 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5736 tcg_temp_free(temp
);
5741 static void decode_rrr_divide(CPUTriCoreState
*env
, DisasContext
*ctx
)
5747 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
5748 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
5749 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
5750 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
5751 r4
= MASK_OP_RRR_D(ctx
->opcode
);
5754 case OPC2_32_RRR_DVADJ
:
5755 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5756 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5758 case OPC2_32_RRR_DVSTEP
:
5759 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5760 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5762 case OPC2_32_RRR_DVSTEP_U
:
5763 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5764 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5766 case OPC2_32_RRR_IXMAX
:
5767 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5768 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5770 case OPC2_32_RRR_IXMAX_U
:
5771 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5772 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5774 case OPC2_32_RRR_IXMIN
:
5775 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5776 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5778 case OPC2_32_RRR_IXMIN_U
:
5779 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5780 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5782 case OPC2_32_RRR_PACK
:
5783 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
5784 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
5790 static void decode_rrr2_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5793 uint32_t r1
, r2
, r3
, r4
;
5795 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
5796 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
5797 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
5798 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
5799 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
5801 case OPC2_32_RRR2_MADD_32
:
5802 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
5805 case OPC2_32_RRR2_MADD_64
:
5806 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5807 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5809 case OPC2_32_RRR2_MADDS_32
:
5810 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5811 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5813 case OPC2_32_RRR2_MADDS_64
:
5814 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5815 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5817 case OPC2_32_RRR2_MADD_U_64
:
5818 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5819 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5821 case OPC2_32_RRR2_MADDS_U_32
:
5822 gen_helper_madd32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5823 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5825 case OPC2_32_RRR2_MADDS_U_64
:
5826 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5827 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5832 static void decode_rrr2_msub(CPUTriCoreState
*env
, DisasContext
*ctx
)
5835 uint32_t r1
, r2
, r3
, r4
;
5837 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
5838 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
5839 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
5840 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
5841 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
5844 case OPC2_32_RRR2_MSUB_32
:
5845 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
5848 case OPC2_32_RRR2_MSUB_64
:
5849 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5850 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5852 case OPC2_32_RRR2_MSUBS_32
:
5853 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5854 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5856 case OPC2_32_RRR2_MSUBS_64
:
5857 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5858 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5860 case OPC2_32_RRR2_MSUB_U_64
:
5861 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5862 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5864 case OPC2_32_RRR2_MSUBS_U_32
:
5865 gen_helper_msub32_suov(cpu_gpr_d
[r4
], cpu_env
, cpu_gpr_d
[r1
],
5866 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5868 case OPC2_32_RRR2_MSUBS_U_64
:
5869 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5870 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
5876 static void decode_rrr1_madd(CPUTriCoreState
*env
, DisasContext
*ctx
)
5879 uint32_t r1
, r2
, r3
, r4
, n
;
5881 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
5882 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
5883 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
5884 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
5885 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
5886 n
= MASK_OP_RRR1_N(ctx
->opcode
);
5889 case OPC2_32_RRR1_MADD_H_LL
:
5890 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5891 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5893 case OPC2_32_RRR1_MADD_H_LU
:
5894 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5895 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5897 case OPC2_32_RRR1_MADD_H_UL
:
5898 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5899 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5901 case OPC2_32_RRR1_MADD_H_UU
:
5902 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5903 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5905 case OPC2_32_RRR1_MADDS_H_LL
:
5906 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5907 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5909 case OPC2_32_RRR1_MADDS_H_LU
:
5910 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5911 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5913 case OPC2_32_RRR1_MADDS_H_UL
:
5914 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5915 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5917 case OPC2_32_RRR1_MADDS_H_UU
:
5918 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5919 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5921 case OPC2_32_RRR1_MADDM_H_LL
:
5922 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5923 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5925 case OPC2_32_RRR1_MADDM_H_LU
:
5926 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5927 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5929 case OPC2_32_RRR1_MADDM_H_UL
:
5930 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5931 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5933 case OPC2_32_RRR1_MADDM_H_UU
:
5934 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5935 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5937 case OPC2_32_RRR1_MADDMS_H_LL
:
5938 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5939 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
5941 case OPC2_32_RRR1_MADDMS_H_LU
:
5942 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5943 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
5945 case OPC2_32_RRR1_MADDMS_H_UL
:
5946 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5947 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
5949 case OPC2_32_RRR1_MADDMS_H_UU
:
5950 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
5951 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
5953 case OPC2_32_RRR1_MADDR_H_LL
:
5954 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5955 cpu_gpr_d
[r2
], n
, MODE_LL
);
5957 case OPC2_32_RRR1_MADDR_H_LU
:
5958 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5959 cpu_gpr_d
[r2
], n
, MODE_LU
);
5961 case OPC2_32_RRR1_MADDR_H_UL
:
5962 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5963 cpu_gpr_d
[r2
], n
, MODE_UL
);
5965 case OPC2_32_RRR1_MADDR_H_UU
:
5966 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5967 cpu_gpr_d
[r2
], n
, MODE_UU
);
5969 case OPC2_32_RRR1_MADDRS_H_LL
:
5970 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5971 cpu_gpr_d
[r2
], n
, MODE_LL
);
5973 case OPC2_32_RRR1_MADDRS_H_LU
:
5974 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5975 cpu_gpr_d
[r2
], n
, MODE_LU
);
5977 case OPC2_32_RRR1_MADDRS_H_UL
:
5978 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5979 cpu_gpr_d
[r2
], n
, MODE_UL
);
5981 case OPC2_32_RRR1_MADDRS_H_UU
:
5982 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5983 cpu_gpr_d
[r2
], n
, MODE_UU
);
5988 static void decode_rrr1_maddq_h(CPUTriCoreState
*env
, DisasContext
*ctx
)
5991 uint32_t r1
, r2
, r3
, r4
, n
;
5994 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
5995 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
5996 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
5997 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
5998 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
5999 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6001 temp
= tcg_const_i32(n
);
6002 temp2
= tcg_temp_new();
6005 case OPC2_32_RRR1_MADD_Q_32
:
6006 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6007 cpu_gpr_d
[r2
], n
, 32, env
);
6009 case OPC2_32_RRR1_MADD_Q_64
:
6010 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6011 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6014 case OPC2_32_RRR1_MADD_Q_32_L
:
6015 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6016 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6019 case OPC2_32_RRR1_MADD_Q_64_L
:
6020 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6021 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6022 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6025 case OPC2_32_RRR1_MADD_Q_32_U
:
6026 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6027 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6030 case OPC2_32_RRR1_MADD_Q_64_U
:
6031 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6032 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6033 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6036 case OPC2_32_RRR1_MADD_Q_32_LL
:
6037 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6038 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6039 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6041 case OPC2_32_RRR1_MADD_Q_64_LL
:
6042 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6043 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6044 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6045 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6047 case OPC2_32_RRR1_MADD_Q_32_UU
:
6048 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6049 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6050 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6052 case OPC2_32_RRR1_MADD_Q_64_UU
:
6053 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6054 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6055 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6056 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6058 case OPC2_32_RRR1_MADDS_Q_32
:
6059 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6060 cpu_gpr_d
[r2
], n
, 32);
6062 case OPC2_32_RRR1_MADDS_Q_64
:
6063 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6064 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6067 case OPC2_32_RRR1_MADDS_Q_32_L
:
6068 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6069 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6072 case OPC2_32_RRR1_MADDS_Q_64_L
:
6073 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6074 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6075 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6078 case OPC2_32_RRR1_MADDS_Q_32_U
:
6079 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6080 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6083 case OPC2_32_RRR1_MADDS_Q_64_U
:
6084 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6085 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6086 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
6089 case OPC2_32_RRR1_MADDS_Q_32_LL
:
6090 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6091 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6092 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6094 case OPC2_32_RRR1_MADDS_Q_64_LL
:
6095 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6096 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6097 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6098 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6100 case OPC2_32_RRR1_MADDS_Q_32_UU
:
6101 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6102 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6103 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6105 case OPC2_32_RRR1_MADDS_Q_64_UU
:
6106 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6107 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6108 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6109 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
6111 case OPC2_32_RRR1_MADDR_H_64_UL
:
6112 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6113 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
6115 case OPC2_32_RRR1_MADDRS_H_64_UL
:
6116 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6117 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
6119 case OPC2_32_RRR1_MADDR_Q_32_LL
:
6120 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6121 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6122 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6124 case OPC2_32_RRR1_MADDR_Q_32_UU
:
6125 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6126 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6127 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6129 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
6130 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6131 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6132 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6134 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
6135 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6136 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6137 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
6140 tcg_temp_free(temp
);
6141 tcg_temp_free(temp2
);
6144 static void decode_32Bit_opc(CPUTriCoreState
*env
, DisasContext
*ctx
)
6148 int32_t address
, const16
;
6151 TCGv temp
, temp2
, temp3
;
6153 op1
= MASK_OP_MAJOR(ctx
->opcode
);
6155 /* handle JNZ.T opcode only being 7 bit long */
6156 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
6157 op1
= OPCM_32_BRN_JTT
;
6162 case OPCM_32_ABS_LDW
:
6163 decode_abs_ldw(env
, ctx
);
6165 case OPCM_32_ABS_LDB
:
6166 decode_abs_ldb(env
, ctx
);
6168 case OPCM_32_ABS_LDMST_SWAP
:
6169 decode_abs_ldst_swap(env
, ctx
);
6171 case OPCM_32_ABS_LDST_CONTEXT
:
6172 decode_abs_ldst_context(env
, ctx
);
6174 case OPCM_32_ABS_STORE
:
6175 decode_abs_store(env
, ctx
);
6177 case OPCM_32_ABS_STOREB_H
:
6178 decode_abs_storeb_h(env
, ctx
);
6180 case OPC1_32_ABS_STOREQ
:
6181 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6182 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6183 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6184 temp2
= tcg_temp_new();
6186 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
6187 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
6189 tcg_temp_free(temp2
);
6190 tcg_temp_free(temp
);
6192 case OPC1_32_ABS_LD_Q
:
6193 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6194 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6195 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6197 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
6198 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
6200 tcg_temp_free(temp
);
6202 case OPC1_32_ABS_LEA
:
6203 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6204 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
6205 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
6208 case OPC1_32_ABSB_ST_T
:
6209 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
6210 b
= MASK_OP_ABSB_B(ctx
->opcode
);
6211 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
6213 temp
= tcg_const_i32(EA_ABS_FORMAT(address
));
6214 temp2
= tcg_temp_new();
6216 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
6217 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
6218 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
6219 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
6221 tcg_temp_free(temp
);
6222 tcg_temp_free(temp2
);
6225 case OPC1_32_B_CALL
:
6226 case OPC1_32_B_CALLA
:
6231 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
6232 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
6235 case OPCM_32_BIT_ANDACC
:
6236 decode_bit_andacc(env
, ctx
);
6238 case OPCM_32_BIT_LOGICAL_T1
:
6239 decode_bit_logical_t(env
, ctx
);
6241 case OPCM_32_BIT_INSERT
:
6242 decode_bit_insert(env
, ctx
);
6244 case OPCM_32_BIT_LOGICAL_T2
:
6245 decode_bit_logical_t2(env
, ctx
);
6247 case OPCM_32_BIT_ORAND
:
6248 decode_bit_orand(env
, ctx
);
6250 case OPCM_32_BIT_SH_LOGIC1
:
6251 decode_bit_sh_logic1(env
, ctx
);
6253 case OPCM_32_BIT_SH_LOGIC2
:
6254 decode_bit_sh_logic2(env
, ctx
);
6257 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
6258 decode_bo_addrmode_post_pre_base(env
, ctx
);
6260 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
6261 decode_bo_addrmode_bitreverse_circular(env
, ctx
);
6263 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
6264 decode_bo_addrmode_ld_post_pre_base(env
, ctx
);
6266 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
6267 decode_bo_addrmode_ld_bitreverse_circular(env
, ctx
);
6269 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
6270 decode_bo_addrmode_stctx_post_pre_base(env
, ctx
);
6272 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
6273 decode_bo_addrmode_ldmst_bitreverse_circular(env
, ctx
);
6276 case OPC1_32_BOL_LD_A_LONGOFF
:
6277 case OPC1_32_BOL_LD_W_LONGOFF
:
6278 case OPC1_32_BOL_LEA_LONGOFF
:
6279 case OPC1_32_BOL_ST_W_LONGOFF
:
6280 case OPC1_32_BOL_ST_A_LONGOFF
:
6281 case OPC1_32_BOL_LD_B_LONGOFF
:
6282 case OPC1_32_BOL_LD_BU_LONGOFF
:
6283 case OPC1_32_BOL_LD_H_LONGOFF
:
6284 case OPC1_32_BOL_LD_HU_LONGOFF
:
6285 case OPC1_32_BOL_ST_B_LONGOFF
:
6286 case OPC1_32_BOL_ST_H_LONGOFF
:
6287 decode_bol_opc(env
, ctx
, op1
);
6290 case OPCM_32_BRC_EQ_NEQ
:
6291 case OPCM_32_BRC_GE
:
6292 case OPCM_32_BRC_JLT
:
6293 case OPCM_32_BRC_JNE
:
6294 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
6295 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
6296 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
6297 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
6300 case OPCM_32_BRN_JTT
:
6301 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
6302 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
6303 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
6306 case OPCM_32_BRR_EQ_NEQ
:
6307 case OPCM_32_BRR_ADDR_EQ_NEQ
:
6308 case OPCM_32_BRR_GE
:
6309 case OPCM_32_BRR_JLT
:
6310 case OPCM_32_BRR_JNE
:
6311 case OPCM_32_BRR_JNZ
:
6312 case OPCM_32_BRR_LOOP
:
6313 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
6314 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
6315 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
6316 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
6319 case OPCM_32_RC_LOGICAL_SHIFT
:
6320 decode_rc_logical_shift(env
, ctx
);
6322 case OPCM_32_RC_ACCUMULATOR
:
6323 decode_rc_accumulator(env
, ctx
);
6325 case OPCM_32_RC_SERVICEROUTINE
:
6326 decode_rc_serviceroutine(env
, ctx
);
6328 case OPCM_32_RC_MUL
:
6329 decode_rc_mul(env
, ctx
);
6332 case OPCM_32_RCPW_MASK_INSERT
:
6333 decode_rcpw_insert(env
, ctx
);
6336 case OPC1_32_RCRR_INSERT
:
6337 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
6338 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
6339 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
6340 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
6341 temp
= tcg_const_i32(const16
);
6342 temp2
= tcg_temp_new(); /* width*/
6343 temp3
= tcg_temp_new(); /* pos */
6345 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
+1], 0x1f);
6346 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
6348 gen_insert(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
6350 tcg_temp_free(temp
);
6351 tcg_temp_free(temp2
);
6352 tcg_temp_free(temp3
);
6355 case OPCM_32_RCRW_MASK_INSERT
:
6356 decode_rcrw_insert(env
, ctx
);
6359 case OPCM_32_RCR_COND_SELECT
:
6360 decode_rcr_cond_select(env
, ctx
);
6362 case OPCM_32_RCR_MADD
:
6363 decode_rcr_madd(env
, ctx
);
6365 case OPCM_32_RCR_MSUB
:
6366 decode_rcr_msub(env
, ctx
);
6369 case OPC1_32_RLC_ADDI
:
6370 case OPC1_32_RLC_ADDIH
:
6371 case OPC1_32_RLC_ADDIH_A
:
6372 case OPC1_32_RLC_MFCR
:
6373 case OPC1_32_RLC_MOV
:
6374 case OPC1_32_RLC_MOV_64
:
6375 case OPC1_32_RLC_MOV_U
:
6376 case OPC1_32_RLC_MOV_H
:
6377 case OPC1_32_RLC_MOVH_A
:
6378 case OPC1_32_RLC_MTCR
:
6379 decode_rlc_opc(env
, ctx
, op1
);
6382 case OPCM_32_RR_ACCUMULATOR
:
6383 decode_rr_accumulator(env
, ctx
);
6385 case OPCM_32_RR_LOGICAL_SHIFT
:
6386 decode_rr_logical_shift(env
, ctx
);
6388 case OPCM_32_RR_ADDRESS
:
6389 decode_rr_address(env
, ctx
);
6391 case OPCM_32_RR_IDIRECT
:
6392 decode_rr_idirect(env
, ctx
);
6394 case OPCM_32_RR_DIVIDE
:
6395 decode_rr_divide(env
, ctx
);
6398 case OPCM_32_RR1_MUL
:
6399 decode_rr1_mul(env
, ctx
);
6401 case OPCM_32_RR1_MULQ
:
6402 decode_rr1_mulq(env
, ctx
);
6405 case OPCM_32_RR2_MUL
:
6406 decode_rr2_mul(env
, ctx
);
6409 case OPCM_32_RRPW_EXTRACT_INSERT
:
6410 decode_rrpw_extract_insert(env
, ctx
);
6412 case OPC1_32_RRPW_DEXTR
:
6413 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6414 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6415 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6416 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
6418 tcg_gen_rotli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], const16
);
6420 temp
= tcg_temp_new();
6421 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], const16
);
6422 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 32 - const16
);
6423 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
6424 tcg_temp_free(temp
);
6428 case OPCM_32_RRR_COND_SELECT
:
6429 decode_rrr_cond_select(env
, ctx
);
6431 case OPCM_32_RRR_DIVIDE
:
6432 decode_rrr_divide(env
, ctx
);
6434 case OPCM_32_RRR2_MADD
:
6435 decode_rrr2_madd(env
, ctx
);
6437 case OPCM_32_RRR2_MSUB
:
6438 decode_rrr2_msub(env
, ctx
);
6441 case OPCM_32_RRR1_MADD
:
6442 decode_rrr1_madd(env
, ctx
);
6444 case OPCM_32_RRR1_MADDQ_H
:
6445 decode_rrr1_maddq_h(env
, ctx
);
6450 static void decode_opc(CPUTriCoreState
*env
, DisasContext
*ctx
, int *is_branch
)
6452 /* 16-Bit Instruction */
6453 if ((ctx
->opcode
& 0x1) == 0) {
6454 ctx
->next_pc
= ctx
->pc
+ 2;
6455 decode_16Bit_opc(env
, ctx
);
6456 /* 32-Bit Instruction */
6458 ctx
->next_pc
= ctx
->pc
+ 4;
6459 decode_32Bit_opc(env
, ctx
);
6464 gen_intermediate_code_internal(TriCoreCPU
*cpu
, struct TranslationBlock
*tb
,
6467 CPUState
*cs
= CPU(cpu
);
6468 CPUTriCoreState
*env
= &cpu
->env
;
6470 target_ulong pc_start
;
6474 qemu_log("search pc %d\n", search_pc
);
6482 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
6483 ctx
.bstate
= BS_NONE
;
6484 ctx
.mem_idx
= cpu_mmu_index(env
);
6486 tcg_clear_temp_count();
6488 while (ctx
.bstate
== BS_NONE
) {
6489 ctx
.opcode
= cpu_ldl_code(env
, ctx
.pc
);
6490 decode_opc(env
, &ctx
, 0);
6494 if (tcg_op_buf_full()) {
6495 gen_save_pc(ctx
.next_pc
);
6500 gen_save_pc(ctx
.next_pc
);
6504 ctx
.pc
= ctx
.next_pc
;
6507 gen_tb_end(tb
, num_insns
);
6509 printf("done_generating search pc\n");
6511 tb
->size
= ctx
.pc
- pc_start
;
6512 tb
->icount
= num_insns
;
6514 if (tcg_check_temp_count()) {
6515 printf("LEAK at %08x\n", env
->PC
);
6519 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
6520 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
6521 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
6528 gen_intermediate_code(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
6530 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, false);
6534 gen_intermediate_code_pc(CPUTriCoreState
*env
, struct TranslationBlock
*tb
)
6536 gen_intermediate_code_internal(tricore_env_get_cpu(env
), tb
, true);
6540 restore_state_to_opc(CPUTriCoreState
*env
, TranslationBlock
*tb
, int pc_pos
)
6542 env
->PC
= tcg_ctx
.gen_opc_pc
[pc_pos
];
6550 void cpu_state_reset(CPUTriCoreState
*env
)
6552 /* Reset Regs to Default Value */
6556 static void tricore_tcg_init_csfr(void)
6558 cpu_PCXI
= tcg_global_mem_new(TCG_AREG0
,
6559 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
6560 cpu_PSW
= tcg_global_mem_new(TCG_AREG0
,
6561 offsetof(CPUTriCoreState
, PSW
), "PSW");
6562 cpu_PC
= tcg_global_mem_new(TCG_AREG0
,
6563 offsetof(CPUTriCoreState
, PC
), "PC");
6564 cpu_ICR
= tcg_global_mem_new(TCG_AREG0
,
6565 offsetof(CPUTriCoreState
, ICR
), "ICR");
6568 void tricore_tcg_init(void)
6575 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
6577 for (i
= 0 ; i
< 16 ; i
++) {
6578 cpu_gpr_a
[i
] = tcg_global_mem_new(TCG_AREG0
,
6579 offsetof(CPUTriCoreState
, gpr_a
[i
]),
6582 for (i
= 0 ; i
< 16 ; i
++) {
6583 cpu_gpr_d
[i
] = tcg_global_mem_new(TCG_AREG0
,
6584 offsetof(CPUTriCoreState
, gpr_d
[i
]),
6587 tricore_tcg_init_csfr();
6588 /* init PSW flag cache */
6589 cpu_PSW_C
= tcg_global_mem_new(TCG_AREG0
,
6590 offsetof(CPUTriCoreState
, PSW_USB_C
),
6592 cpu_PSW_V
= tcg_global_mem_new(TCG_AREG0
,
6593 offsetof(CPUTriCoreState
, PSW_USB_V
),
6595 cpu_PSW_SV
= tcg_global_mem_new(TCG_AREG0
,
6596 offsetof(CPUTriCoreState
, PSW_USB_SV
),
6598 cpu_PSW_AV
= tcg_global_mem_new(TCG_AREG0
,
6599 offsetof(CPUTriCoreState
, PSW_USB_AV
),
6601 cpu_PSW_SAV
= tcg_global_mem_new(TCG_AREG0
,
6602 offsetof(CPUTriCoreState
, PSW_USB_SAV
),