2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "tricore-opcodes.h"
33 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
40 #define DISAS_EXIT DISAS_TARGET_0
41 #define DISAS_EXIT_UPDATE DISAS_TARGET_1
42 #define DISAS_JUMP DISAS_TARGET_2
52 static TCGv cpu_gpr_a
[16];
53 static TCGv cpu_gpr_d
[16];
55 static TCGv cpu_PSW_C
;
56 static TCGv cpu_PSW_V
;
57 static TCGv cpu_PSW_SV
;
58 static TCGv cpu_PSW_AV
;
59 static TCGv cpu_PSW_SAV
;
61 static const char *regnames_a
[] = {
62 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
63 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
64 "a12" , "a13" , "a14" , "a15",
67 static const char *regnames_d
[] = {
68 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
69 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
70 "d12" , "d13" , "d14" , "d15",
73 typedef struct DisasContext
{
74 DisasContextBase base
;
75 target_ulong pc_succ_insn
;
77 /* Routine used to access memory */
81 uint32_t icr_ie_mask
, icr_ie_offset
;
84 static int has_feature(DisasContext
*ctx
, int feature
)
86 return (ctx
->features
& (1ULL << feature
)) != 0;
96 void tricore_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
98 TriCoreCPU
*cpu
= TRICORE_CPU(cs
);
99 CPUTriCoreState
*env
= &cpu
->env
;
105 qemu_fprintf(f
, "PC: " TARGET_FMT_lx
, env
->PC
);
106 qemu_fprintf(f
, " PSW: " TARGET_FMT_lx
, psw
);
107 qemu_fprintf(f
, " ICR: " TARGET_FMT_lx
, env
->ICR
);
108 qemu_fprintf(f
, "\nPCXI: " TARGET_FMT_lx
, env
->PCXI
);
109 qemu_fprintf(f
, " FCX: " TARGET_FMT_lx
, env
->FCX
);
110 qemu_fprintf(f
, " LCX: " TARGET_FMT_lx
, env
->LCX
);
112 for (i
= 0; i
< 16; ++i
) {
114 qemu_fprintf(f
, "\nGPR A%02d:", i
);
116 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_a
[i
]);
118 for (i
= 0; i
< 16; ++i
) {
120 qemu_fprintf(f
, "\nGPR D%02d:", i
);
122 qemu_fprintf(f
, " " TARGET_FMT_lx
, env
->gpr_d
[i
]);
124 qemu_fprintf(f
, "\n");
128 * Functions to generate micro-ops
131 /* Macros for generating helpers */
133 #define gen_helper_1arg(name, arg) do { \
134 TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
135 gen_helper_##name(tcg_env, helper_tmp); \
138 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
139 TCGv arg00 = tcg_temp_new(); \
140 TCGv arg01 = tcg_temp_new(); \
141 TCGv arg11 = tcg_temp_new(); \
142 tcg_gen_sari_tl(arg00, arg0, 16); \
143 tcg_gen_ext16s_tl(arg01, arg0); \
144 tcg_gen_ext16s_tl(arg11, arg1); \
145 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
148 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
149 TCGv arg00 = tcg_temp_new(); \
150 TCGv arg01 = tcg_temp_new(); \
151 TCGv arg10 = tcg_temp_new(); \
152 TCGv arg11 = tcg_temp_new(); \
153 tcg_gen_sari_tl(arg00, arg0, 16); \
154 tcg_gen_ext16s_tl(arg01, arg0); \
155 tcg_gen_sari_tl(arg11, arg1, 16); \
156 tcg_gen_ext16s_tl(arg10, arg1); \
157 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
160 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
161 TCGv arg00 = tcg_temp_new(); \
162 TCGv arg01 = tcg_temp_new(); \
163 TCGv arg10 = tcg_temp_new(); \
164 TCGv arg11 = tcg_temp_new(); \
165 tcg_gen_sari_tl(arg00, arg0, 16); \
166 tcg_gen_ext16s_tl(arg01, arg0); \
167 tcg_gen_sari_tl(arg10, arg1, 16); \
168 tcg_gen_ext16s_tl(arg11, arg1); \
169 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
172 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
173 TCGv arg00 = tcg_temp_new(); \
174 TCGv arg01 = tcg_temp_new(); \
175 TCGv arg11 = tcg_temp_new(); \
176 tcg_gen_sari_tl(arg01, arg0, 16); \
177 tcg_gen_ext16s_tl(arg00, arg0); \
178 tcg_gen_sari_tl(arg11, arg1, 16); \
179 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
182 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
183 TCGv_i64 ret = tcg_temp_new_i64(); \
184 TCGv_i64 arg1 = tcg_temp_new_i64(); \
186 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
187 gen_helper_##name(ret, arg1, arg2); \
188 tcg_gen_extr_i64_i32(rl, rh, ret); \
191 #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
192 TCGv_i64 ret = tcg_temp_new_i64(); \
194 gen_helper_##name(ret, tcg_env, arg1, arg2); \
195 tcg_gen_extr_i64_i32(rl, rh, ret); \
198 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
199 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
200 ((offset & 0x0fffff) << 1))
202 /* For two 32-bit registers used a 64-bit register, the first
203 registernumber needs to be even. Otherwise we trap. */
204 static inline void generate_trap(DisasContext
*ctx
, int class, int tin
);
205 #define CHECK_REG_PAIR(reg) do { \
207 generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
211 /* Functions for load/save to/from memory */
213 static inline void gen_offset_ld(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
214 int16_t con
, MemOp mop
)
216 TCGv temp
= tcg_temp_new();
217 tcg_gen_addi_tl(temp
, r2
, con
);
218 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
221 static inline void gen_offset_st(DisasContext
*ctx
, TCGv r1
, TCGv r2
,
222 int16_t con
, MemOp mop
)
224 TCGv temp
= tcg_temp_new();
225 tcg_gen_addi_tl(temp
, r2
, con
);
226 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
229 static void gen_st_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
231 TCGv_i64 temp
= tcg_temp_new_i64();
233 tcg_gen_concat_i32_i64(temp
, rl
, rh
);
234 tcg_gen_qemu_st_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
237 static void gen_offset_st_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
240 TCGv temp
= tcg_temp_new();
241 tcg_gen_addi_tl(temp
, base
, con
);
242 gen_st_2regs_64(rh
, rl
, temp
, ctx
);
245 static void gen_ld_2regs_64(TCGv rh
, TCGv rl
, TCGv address
, DisasContext
*ctx
)
247 TCGv_i64 temp
= tcg_temp_new_i64();
249 tcg_gen_qemu_ld_i64(temp
, address
, ctx
->mem_idx
, MO_LEUQ
);
250 /* write back to two 32 bit regs */
251 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
254 static void gen_offset_ld_2regs(TCGv rh
, TCGv rl
, TCGv base
, int16_t con
,
257 TCGv temp
= tcg_temp_new();
258 tcg_gen_addi_tl(temp
, base
, con
);
259 gen_ld_2regs_64(rh
, rl
, temp
, ctx
);
262 static void gen_st_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
265 TCGv temp
= tcg_temp_new();
266 tcg_gen_addi_tl(temp
, r2
, off
);
267 tcg_gen_qemu_st_tl(r1
, temp
, ctx
->mem_idx
, mop
);
268 tcg_gen_mov_tl(r2
, temp
);
271 static void gen_ld_preincr(DisasContext
*ctx
, TCGv r1
, TCGv r2
, int16_t off
,
274 TCGv temp
= tcg_temp_new();
275 tcg_gen_addi_tl(temp
, r2
, off
);
276 tcg_gen_qemu_ld_tl(r1
, temp
, ctx
->mem_idx
, mop
);
277 tcg_gen_mov_tl(r2
, temp
);
280 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
281 static void gen_ldmst(DisasContext
*ctx
, int ereg
, TCGv ea
)
283 TCGv temp
= tcg_temp_new();
284 TCGv temp2
= tcg_temp_new();
286 CHECK_REG_PAIR(ereg
);
287 /* temp = (M(EA, word) */
288 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
289 /* temp = temp & ~E[a][63:32]) */
290 tcg_gen_andc_tl(temp
, temp
, cpu_gpr_d
[ereg
+1]);
291 /* temp2 = (E[a][31:0] & E[a][63:32]); */
292 tcg_gen_and_tl(temp2
, cpu_gpr_d
[ereg
], cpu_gpr_d
[ereg
+1]);
293 /* temp = temp | temp2; */
294 tcg_gen_or_tl(temp
, temp
, temp2
);
295 /* M(EA, word) = temp; */
296 tcg_gen_qemu_st_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
299 /* tmp = M(EA, word);
302 static void gen_swap(DisasContext
*ctx
, int reg
, TCGv ea
)
304 TCGv temp
= tcg_temp_new();
306 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
307 tcg_gen_qemu_st_tl(cpu_gpr_d
[reg
], ea
, ctx
->mem_idx
, MO_LEUL
);
308 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
311 static void gen_cmpswap(DisasContext
*ctx
, int reg
, TCGv ea
)
313 TCGv temp
= tcg_temp_new();
314 TCGv temp2
= tcg_temp_new();
316 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
317 tcg_gen_movcond_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[reg
+1], temp
,
318 cpu_gpr_d
[reg
], temp
);
319 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
320 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
323 static void gen_swapmsk(DisasContext
*ctx
, int reg
, TCGv ea
)
325 TCGv temp
= tcg_temp_new();
326 TCGv temp2
= tcg_temp_new();
327 TCGv temp3
= tcg_temp_new();
329 tcg_gen_qemu_ld_tl(temp
, ea
, ctx
->mem_idx
, MO_LEUL
);
330 tcg_gen_and_tl(temp2
, cpu_gpr_d
[reg
], cpu_gpr_d
[reg
+1]);
331 tcg_gen_andc_tl(temp3
, temp
, cpu_gpr_d
[reg
+1]);
332 tcg_gen_or_tl(temp2
, temp2
, temp3
);
333 tcg_gen_qemu_st_tl(temp2
, ea
, ctx
->mem_idx
, MO_LEUL
);
334 tcg_gen_mov_tl(cpu_gpr_d
[reg
], temp
);
337 /* We generate loads and store to core special function register (csfr) through
338 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
339 macros R, A and E, which allow read-only, all and endinit protected access.
340 These macros also specify in which ISA version the csfr was introduced. */
341 #define R(ADDRESS, REG, FEATURE) \
343 if (has_feature(ctx, FEATURE)) { \
344 tcg_gen_ld_tl(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
347 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
348 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
349 static inline void gen_mfcr(DisasContext
*ctx
, TCGv ret
, int32_t offset
)
351 /* since we're caching PSW make this a special case */
352 if (offset
== 0xfe04) {
353 gen_helper_psw_read(ret
, tcg_env
);
356 #include "csfr.h.inc"
364 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
365 since no exception occurs */
366 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
368 if (has_feature(ctx, FEATURE)) { \
369 tcg_gen_st_tl(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
372 /* Endinit protected registers
373 TODO: Since the endinit bit is in a register of a not yet implemented
374 watchdog device, we handle endinit protected registers like
375 all-access registers for now. */
376 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
377 static inline void gen_mtcr(DisasContext
*ctx
, TCGv r1
,
380 if (ctx
->priv
== TRICORE_PRIV_SM
) {
381 /* since we're caching PSW make this a special case */
382 if (offset
== 0xfe04) {
383 gen_helper_psw_write(tcg_env
, r1
);
384 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
387 #include "csfr.h.inc"
391 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
395 /* Functions for arithmetic instructions */
397 static inline void gen_add_d(TCGv ret
, TCGv r1
, TCGv r2
)
399 TCGv t0
= tcg_temp_new_i32();
400 TCGv result
= tcg_temp_new_i32();
401 /* Addition and set V/SV bits */
402 tcg_gen_add_tl(result
, r1
, r2
);
404 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
405 tcg_gen_xor_tl(t0
, r1
, r2
);
406 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
408 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
409 /* Calc AV/SAV bits */
410 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
411 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
413 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
414 /* write back result */
415 tcg_gen_mov_tl(ret
, result
);
419 gen_add64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
421 TCGv temp
= tcg_temp_new();
422 TCGv_i64 t0
= tcg_temp_new_i64();
423 TCGv_i64 t1
= tcg_temp_new_i64();
424 TCGv_i64 result
= tcg_temp_new_i64();
426 tcg_gen_add_i64(result
, r1
, r2
);
428 tcg_gen_xor_i64(t1
, result
, r1
);
429 tcg_gen_xor_i64(t0
, r1
, r2
);
430 tcg_gen_andc_i64(t1
, t1
, t0
);
431 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
433 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
434 /* calc AV/SAV bits */
435 tcg_gen_extrh_i64_i32(temp
, result
);
436 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
437 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
439 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
440 /* write back result */
441 tcg_gen_mov_i64(ret
, result
);
445 gen_addsub64_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
446 TCGv r3
, void(*op1
)(TCGv
, TCGv
, TCGv
),
447 void(*op2
)(TCGv
, TCGv
, TCGv
))
449 TCGv temp
= tcg_temp_new();
450 TCGv temp2
= tcg_temp_new();
451 TCGv temp3
= tcg_temp_new();
452 TCGv temp4
= tcg_temp_new();
454 (*op1
)(temp
, r1_low
, r2
);
456 tcg_gen_xor_tl(temp2
, temp
, r1_low
);
457 tcg_gen_xor_tl(temp3
, r1_low
, r2
);
458 if (op1
== tcg_gen_add_tl
) {
459 tcg_gen_andc_tl(temp2
, temp2
, temp3
);
461 tcg_gen_and_tl(temp2
, temp2
, temp3
);
464 (*op2
)(temp3
, r1_high
, r3
);
466 tcg_gen_xor_tl(cpu_PSW_V
, temp3
, r1_high
);
467 tcg_gen_xor_tl(temp4
, r1_high
, r3
);
468 if (op2
== tcg_gen_add_tl
) {
469 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
471 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp4
);
473 /* combine V0/V1 bits */
474 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp2
);
476 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
478 tcg_gen_mov_tl(ret_low
, temp
);
479 tcg_gen_mov_tl(ret_high
, temp3
);
481 tcg_gen_add_tl(temp
, ret_low
, ret_low
);
482 tcg_gen_xor_tl(temp
, temp
, ret_low
);
483 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
484 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, ret_high
);
485 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
487 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
490 /* ret = r2 + (r1 * r3); */
491 static inline void gen_madd32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
493 TCGv_i64 t1
= tcg_temp_new_i64();
494 TCGv_i64 t2
= tcg_temp_new_i64();
495 TCGv_i64 t3
= tcg_temp_new_i64();
497 tcg_gen_ext_i32_i64(t1
, r1
);
498 tcg_gen_ext_i32_i64(t2
, r2
);
499 tcg_gen_ext_i32_i64(t3
, r3
);
501 tcg_gen_mul_i64(t1
, t1
, t3
);
502 tcg_gen_add_i64(t1
, t2
, t1
);
504 tcg_gen_extrl_i64_i32(ret
, t1
);
507 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
508 /* t1 < -0x80000000 */
509 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
510 tcg_gen_or_i64(t2
, t2
, t3
);
511 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
512 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
514 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
515 /* Calc AV/SAV bits */
516 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
517 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
519 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
522 static inline void gen_maddi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
524 TCGv temp
= tcg_constant_i32(con
);
525 gen_madd32_d(ret
, r1
, r2
, temp
);
529 gen_madd64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
532 TCGv t1
= tcg_temp_new();
533 TCGv t2
= tcg_temp_new();
534 TCGv t3
= tcg_temp_new();
535 TCGv t4
= tcg_temp_new();
537 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
538 /* only the add can overflow */
539 tcg_gen_add2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
541 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
542 tcg_gen_xor_tl(t1
, r2_high
, t2
);
543 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
545 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
546 /* Calc AV/SAV bits */
547 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
548 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
550 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
551 /* write back the result */
552 tcg_gen_mov_tl(ret_low
, t3
);
553 tcg_gen_mov_tl(ret_high
, t4
);
557 gen_maddu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
560 TCGv_i64 t1
= tcg_temp_new_i64();
561 TCGv_i64 t2
= tcg_temp_new_i64();
562 TCGv_i64 t3
= tcg_temp_new_i64();
564 tcg_gen_extu_i32_i64(t1
, r1
);
565 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
566 tcg_gen_extu_i32_i64(t3
, r3
);
568 tcg_gen_mul_i64(t1
, t1
, t3
);
569 tcg_gen_add_i64(t2
, t2
, t1
);
570 /* write back result */
571 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t2
);
572 /* only the add overflows, if t2 < t1
574 tcg_gen_setcond_i64(TCG_COND_LTU
, t2
, t2
, t1
);
575 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
576 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
578 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
579 /* Calc AV/SAV bits */
580 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
581 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
583 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
587 gen_maddi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
590 TCGv temp
= tcg_constant_i32(con
);
591 gen_madd64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
595 gen_maddui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
598 TCGv temp
= tcg_constant_i32(con
);
599 gen_maddu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
603 gen_madd_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
604 TCGv r3
, uint32_t n
, uint32_t mode
)
606 TCGv t_n
= tcg_constant_i32(n
);
607 TCGv temp
= tcg_temp_new();
608 TCGv temp2
= tcg_temp_new();
609 TCGv_i64 temp64
= tcg_temp_new_i64();
612 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
615 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
618 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
621 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
624 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
625 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
626 tcg_gen_add_tl
, tcg_gen_add_tl
);
630 gen_maddsu_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
631 TCGv r3
, uint32_t n
, uint32_t mode
)
633 TCGv t_n
= tcg_constant_i32(n
);
634 TCGv temp
= tcg_temp_new();
635 TCGv temp2
= tcg_temp_new();
636 TCGv_i64 temp64
= tcg_temp_new_i64();
639 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
642 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
645 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
648 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
651 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
652 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
653 tcg_gen_sub_tl
, tcg_gen_add_tl
);
657 gen_maddsum_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
658 TCGv r3
, uint32_t n
, uint32_t mode
)
660 TCGv t_n
= tcg_constant_i32(n
);
661 TCGv_i64 temp64
= tcg_temp_new_i64();
662 TCGv_i64 temp64_2
= tcg_temp_new_i64();
663 TCGv_i64 temp64_3
= tcg_temp_new_i64();
666 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
669 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
672 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
675 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
678 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
679 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
680 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
681 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
682 tcg_gen_shli_i64(temp64
, temp64
, 16);
684 gen_add64_d(temp64_2
, temp64_3
, temp64
);
685 /* write back result */
686 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
689 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
);
692 gen_madds_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
693 TCGv r3
, uint32_t n
, uint32_t mode
)
695 TCGv t_n
= tcg_constant_i32(n
);
696 TCGv temp
= tcg_temp_new();
697 TCGv temp2
= tcg_temp_new();
698 TCGv temp3
= tcg_temp_new();
699 TCGv_i64 temp64
= tcg_temp_new_i64();
703 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
706 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
709 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
712 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
715 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
716 gen_adds(ret_low
, r1_low
, temp
);
717 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
718 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
719 gen_adds(ret_high
, r1_high
, temp2
);
721 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
722 /* combine av bits */
723 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
726 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
);
729 gen_maddsus_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
730 TCGv r3
, uint32_t n
, uint32_t mode
)
732 TCGv t_n
= tcg_constant_i32(n
);
733 TCGv temp
= tcg_temp_new();
734 TCGv temp2
= tcg_temp_new();
735 TCGv temp3
= tcg_temp_new();
736 TCGv_i64 temp64
= tcg_temp_new_i64();
740 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
743 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
746 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
749 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
752 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
753 gen_subs(ret_low
, r1_low
, temp
);
754 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
755 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
756 gen_adds(ret_high
, r1_high
, temp2
);
758 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
759 /* combine av bits */
760 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
764 gen_maddsums_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
765 TCGv r3
, uint32_t n
, uint32_t mode
)
767 TCGv t_n
= tcg_constant_i32(n
);
768 TCGv_i64 temp64
= tcg_temp_new_i64();
769 TCGv_i64 temp64_2
= tcg_temp_new_i64();
773 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
776 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
779 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
782 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
785 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
786 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
787 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
788 tcg_gen_shli_i64(temp64
, temp64
, 16);
789 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
791 gen_helper_add64_ssov(temp64
, tcg_env
, temp64_2
, temp64
);
792 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
797 gen_maddm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
798 TCGv r3
, uint32_t n
, uint32_t mode
)
800 TCGv t_n
= tcg_constant_i32(n
);
801 TCGv_i64 temp64
= tcg_temp_new_i64();
802 TCGv_i64 temp64_2
= tcg_temp_new_i64();
803 TCGv_i64 temp64_3
= tcg_temp_new_i64();
806 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
809 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
812 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
815 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
818 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
819 gen_add64_d(temp64_3
, temp64_2
, temp64
);
820 /* write back result */
821 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
825 gen_maddms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
826 TCGv r3
, uint32_t n
, uint32_t mode
)
828 TCGv t_n
= tcg_constant_i32(n
);
829 TCGv_i64 temp64
= tcg_temp_new_i64();
830 TCGv_i64 temp64_2
= tcg_temp_new_i64();
833 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
836 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
839 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
842 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
845 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
846 gen_helper_add64_ssov(temp64
, tcg_env
, temp64_2
, temp64
);
847 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
851 gen_maddr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
854 TCGv t_n
= tcg_constant_i32(n
);
855 TCGv_i64 temp64
= tcg_temp_new_i64();
858 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
861 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
864 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
867 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
870 gen_helper_addr_h(ret
, tcg_env
, temp64
, r1_low
, r1_high
);
874 gen_maddr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
876 TCGv temp
= tcg_temp_new();
877 TCGv temp2
= tcg_temp_new();
879 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
880 tcg_gen_shli_tl(temp
, r1
, 16);
881 gen_maddr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
885 gen_maddsur32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
887 TCGv t_n
= tcg_constant_i32(n
);
888 TCGv temp
= tcg_temp_new();
889 TCGv temp2
= tcg_temp_new();
890 TCGv_i64 temp64
= tcg_temp_new_i64();
893 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
896 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
899 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
902 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
905 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
906 tcg_gen_shli_tl(temp
, r1
, 16);
907 gen_helper_addsur_h(ret
, tcg_env
, temp64
, temp
, temp2
);
912 gen_maddr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
913 uint32_t n
, uint32_t mode
)
915 TCGv t_n
= tcg_constant_i32(n
);
916 TCGv_i64 temp64
= tcg_temp_new_i64();
919 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
922 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
925 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
928 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
931 gen_helper_addr_h_ssov(ret
, tcg_env
, temp64
, r1_low
, r1_high
);
935 gen_maddr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
937 TCGv temp
= tcg_temp_new();
938 TCGv temp2
= tcg_temp_new();
940 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
941 tcg_gen_shli_tl(temp
, r1
, 16);
942 gen_maddr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
946 gen_maddsur32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
948 TCGv t_n
= tcg_constant_i32(n
);
949 TCGv temp
= tcg_temp_new();
950 TCGv temp2
= tcg_temp_new();
951 TCGv_i64 temp64
= tcg_temp_new_i64();
954 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
957 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
960 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
963 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
966 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
967 tcg_gen_shli_tl(temp
, r1
, 16);
968 gen_helper_addsur_h_ssov(ret
, tcg_env
, temp64
, temp
, temp2
);
972 gen_maddr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
974 TCGv t_n
= tcg_constant_i32(n
);
975 gen_helper_maddr_q(ret
, tcg_env
, r1
, r2
, r3
, t_n
);
979 gen_maddrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
981 TCGv t_n
= tcg_constant_i32(n
);
982 gen_helper_maddr_q_ssov(ret
, tcg_env
, r1
, r2
, r3
, t_n
);
986 gen_madd32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
989 TCGv temp
= tcg_temp_new();
990 TCGv temp2
= tcg_temp_new();
991 TCGv temp3
= tcg_temp_new();
992 TCGv_i64 t1
= tcg_temp_new_i64();
993 TCGv_i64 t2
= tcg_temp_new_i64();
994 TCGv_i64 t3
= tcg_temp_new_i64();
996 tcg_gen_ext_i32_i64(t2
, arg2
);
997 tcg_gen_ext_i32_i64(t3
, arg3
);
999 tcg_gen_mul_i64(t2
, t2
, t3
);
1000 tcg_gen_shli_i64(t2
, t2
, n
);
1002 tcg_gen_ext_i32_i64(t1
, arg1
);
1003 tcg_gen_sari_i64(t2
, t2
, up_shift
);
1005 tcg_gen_add_i64(t3
, t1
, t2
);
1006 tcg_gen_extrl_i64_i32(temp3
, t3
);
1008 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1009 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1010 tcg_gen_or_i64(t1
, t1
, t2
);
1011 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1012 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1013 /* We produce an overflow on the host if the mul before was
1014 (0x80000000 * 0x80000000) << 1). If this is the
1015 case, we negate the ovf. */
1017 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1018 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1019 tcg_gen_and_tl(temp
, temp
, temp2
);
1020 tcg_gen_shli_tl(temp
, temp
, 31);
1021 /* negate v bit, if special condition */
1022 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1025 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1026 /* Calc AV/SAV bits */
1027 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1028 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1030 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1031 /* write back result */
1032 tcg_gen_mov_tl(ret
, temp3
);
1036 gen_m16add32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1038 TCGv temp
= tcg_temp_new();
1039 TCGv temp2
= tcg_temp_new();
1041 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1042 } else { /* n is expected to be 1 */
1043 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1044 tcg_gen_shli_tl(temp
, temp
, 1);
1045 /* catch special case r1 = r2 = 0x8000 */
1046 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1047 tcg_gen_sub_tl(temp
, temp
, temp2
);
1049 gen_add_d(ret
, arg1
, temp
);
1053 gen_m16adds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1055 TCGv temp
= tcg_temp_new();
1056 TCGv temp2
= tcg_temp_new();
1058 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1059 } else { /* n is expected to be 1 */
1060 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1061 tcg_gen_shli_tl(temp
, temp
, 1);
1062 /* catch special case r1 = r2 = 0x8000 */
1063 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1064 tcg_gen_sub_tl(temp
, temp
, temp2
);
1066 gen_adds(ret
, arg1
, temp
);
1070 gen_m16add64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1071 TCGv arg3
, uint32_t n
)
1073 TCGv temp
= tcg_temp_new();
1074 TCGv temp2
= tcg_temp_new();
1075 TCGv_i64 t1
= tcg_temp_new_i64();
1076 TCGv_i64 t2
= tcg_temp_new_i64();
1077 TCGv_i64 t3
= tcg_temp_new_i64();
1080 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1081 } else { /* n is expected to be 1 */
1082 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1083 tcg_gen_shli_tl(temp
, temp
, 1);
1084 /* catch special case r1 = r2 = 0x8000 */
1085 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1086 tcg_gen_sub_tl(temp
, temp
, temp2
);
1088 tcg_gen_ext_i32_i64(t2
, temp
);
1089 tcg_gen_shli_i64(t2
, t2
, 16);
1090 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1091 gen_add64_d(t3
, t1
, t2
);
1092 /* write back result */
1093 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1097 gen_m16adds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1098 TCGv arg3
, uint32_t n
)
1100 TCGv temp
= tcg_temp_new();
1101 TCGv temp2
= tcg_temp_new();
1102 TCGv_i64 t1
= tcg_temp_new_i64();
1103 TCGv_i64 t2
= tcg_temp_new_i64();
1106 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1107 } else { /* n is expected to be 1 */
1108 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1109 tcg_gen_shli_tl(temp
, temp
, 1);
1110 /* catch special case r1 = r2 = 0x8000 */
1111 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1112 tcg_gen_sub_tl(temp
, temp
, temp2
);
1114 tcg_gen_ext_i32_i64(t2
, temp
);
1115 tcg_gen_shli_i64(t2
, t2
, 16);
1116 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1118 gen_helper_add64_ssov(t1
, tcg_env
, t1
, t2
);
1119 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1123 gen_madd64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1124 TCGv arg3
, uint32_t n
)
1126 TCGv_i64 t1
= tcg_temp_new_i64();
1127 TCGv_i64 t2
= tcg_temp_new_i64();
1128 TCGv_i64 t3
= tcg_temp_new_i64();
1129 TCGv_i64 t4
= tcg_temp_new_i64();
1132 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1133 tcg_gen_ext_i32_i64(t2
, arg2
);
1134 tcg_gen_ext_i32_i64(t3
, arg3
);
1136 tcg_gen_mul_i64(t2
, t2
, t3
);
1138 tcg_gen_shli_i64(t2
, t2
, 1);
1140 tcg_gen_add_i64(t4
, t1
, t2
);
1142 tcg_gen_xor_i64(t3
, t4
, t1
);
1143 tcg_gen_xor_i64(t2
, t1
, t2
);
1144 tcg_gen_andc_i64(t3
, t3
, t2
);
1145 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1146 /* We produce an overflow on the host if the mul before was
1147 (0x80000000 * 0x80000000) << 1). If this is the
1148 case, we negate the ovf. */
1150 temp
= tcg_temp_new();
1151 temp2
= tcg_temp_new();
1152 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1153 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1154 tcg_gen_and_tl(temp
, temp
, temp2
);
1155 tcg_gen_shli_tl(temp
, temp
, 31);
1156 /* negate v bit, if special condition */
1157 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1159 /* write back result */
1160 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1162 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1163 /* Calc AV/SAV bits */
1164 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1165 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1167 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1171 gen_madds32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1174 TCGv_i64 t1
= tcg_temp_new_i64();
1175 TCGv_i64 t2
= tcg_temp_new_i64();
1176 TCGv_i64 t3
= tcg_temp_new_i64();
1178 tcg_gen_ext_i32_i64(t1
, arg1
);
1179 tcg_gen_ext_i32_i64(t2
, arg2
);
1180 tcg_gen_ext_i32_i64(t3
, arg3
);
1182 tcg_gen_mul_i64(t2
, t2
, t3
);
1183 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1185 gen_helper_madd32_q_add_ssov(ret
, tcg_env
, t1
, t2
);
1189 gen_madds64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1190 TCGv arg3
, uint32_t n
)
1192 TCGv_i64 r1
= tcg_temp_new_i64();
1193 TCGv t_n
= tcg_constant_i32(n
);
1195 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1196 gen_helper_madd64_q_ssov(r1
, tcg_env
, r1
, arg2
, arg3
, t_n
);
1197 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1200 /* ret = r2 - (r1 * r3); */
1201 static inline void gen_msub32_d(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
)
1203 TCGv_i64 t1
= tcg_temp_new_i64();
1204 TCGv_i64 t2
= tcg_temp_new_i64();
1205 TCGv_i64 t3
= tcg_temp_new_i64();
1207 tcg_gen_ext_i32_i64(t1
, r1
);
1208 tcg_gen_ext_i32_i64(t2
, r2
);
1209 tcg_gen_ext_i32_i64(t3
, r3
);
1211 tcg_gen_mul_i64(t1
, t1
, t3
);
1212 tcg_gen_sub_i64(t1
, t2
, t1
);
1214 tcg_gen_extrl_i64_i32(ret
, t1
);
1217 tcg_gen_setcondi_i64(TCG_COND_GT
, t3
, t1
, 0x7fffffffLL
);
1218 /* result < -0x80000000 */
1219 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t1
, -0x80000000LL
);
1220 tcg_gen_or_i64(t2
, t2
, t3
);
1221 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t2
);
1222 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1225 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1226 /* Calc AV/SAV bits */
1227 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
1228 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
1230 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1233 static inline void gen_msubi32_d(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
1235 TCGv temp
= tcg_constant_i32(con
);
1236 gen_msub32_d(ret
, r1
, r2
, temp
);
1240 gen_msub64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1243 TCGv t1
= tcg_temp_new();
1244 TCGv t2
= tcg_temp_new();
1245 TCGv t3
= tcg_temp_new();
1246 TCGv t4
= tcg_temp_new();
1248 tcg_gen_muls2_tl(t1
, t2
, r1
, r3
);
1249 /* only the sub can overflow */
1250 tcg_gen_sub2_tl(t3
, t4
, r2_low
, r2_high
, t1
, t2
);
1252 tcg_gen_xor_tl(cpu_PSW_V
, t4
, r2_high
);
1253 tcg_gen_xor_tl(t1
, r2_high
, t2
);
1254 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, t1
);
1256 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1257 /* Calc AV/SAV bits */
1258 tcg_gen_add_tl(cpu_PSW_AV
, t4
, t4
);
1259 tcg_gen_xor_tl(cpu_PSW_AV
, t4
, cpu_PSW_AV
);
1261 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1262 /* write back the result */
1263 tcg_gen_mov_tl(ret_low
, t3
);
1264 tcg_gen_mov_tl(ret_high
, t4
);
1268 gen_msubi64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1271 TCGv temp
= tcg_constant_i32(con
);
1272 gen_msub64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1276 gen_msubu64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1279 TCGv_i64 t1
= tcg_temp_new_i64();
1280 TCGv_i64 t2
= tcg_temp_new_i64();
1281 TCGv_i64 t3
= tcg_temp_new_i64();
1283 tcg_gen_extu_i32_i64(t1
, r1
);
1284 tcg_gen_concat_i32_i64(t2
, r2_low
, r2_high
);
1285 tcg_gen_extu_i32_i64(t3
, r3
);
1287 tcg_gen_mul_i64(t1
, t1
, t3
);
1288 tcg_gen_sub_i64(t3
, t2
, t1
);
1289 tcg_gen_extr_i64_i32(ret_low
, ret_high
, t3
);
1290 /* calc V bit, only the sub can overflow, if t1 > t2 */
1291 tcg_gen_setcond_i64(TCG_COND_GTU
, t1
, t1
, t2
);
1292 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1293 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1295 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1296 /* Calc AV/SAV bits */
1297 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
1298 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
1300 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1304 gen_msubui64_d(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
1307 TCGv temp
= tcg_constant_i32(con
);
1308 gen_msubu64_d(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
1311 static inline void gen_addi_d(TCGv ret
, TCGv r1
, target_ulong r2
)
1313 TCGv temp
= tcg_constant_i32(r2
);
1314 gen_add_d(ret
, r1
, temp
);
1317 /* calculate the carry bit too */
1318 static inline void gen_add_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1320 TCGv t0
= tcg_temp_new_i32();
1321 TCGv result
= tcg_temp_new_i32();
1323 tcg_gen_movi_tl(t0
, 0);
1324 /* Addition and set C/V/SV bits */
1325 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, r2
, t0
);
1327 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1328 tcg_gen_xor_tl(t0
, r1
, r2
);
1329 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1331 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1332 /* Calc AV/SAV bits */
1333 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1334 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1336 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1337 /* write back result */
1338 tcg_gen_mov_tl(ret
, result
);
1341 static inline void gen_addi_CC(TCGv ret
, TCGv r1
, int32_t con
)
1343 TCGv temp
= tcg_constant_i32(con
);
1344 gen_add_CC(ret
, r1
, temp
);
1347 static inline void gen_addc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1349 TCGv carry
= tcg_temp_new_i32();
1350 TCGv t0
= tcg_temp_new_i32();
1351 TCGv result
= tcg_temp_new_i32();
1353 tcg_gen_movi_tl(t0
, 0);
1354 tcg_gen_setcondi_tl(TCG_COND_NE
, carry
, cpu_PSW_C
, 0);
1355 /* Addition, carry and set C/V/SV bits */
1356 tcg_gen_add2_i32(result
, cpu_PSW_C
, r1
, t0
, carry
, t0
);
1357 tcg_gen_add2_i32(result
, cpu_PSW_C
, result
, cpu_PSW_C
, r2
, t0
);
1359 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1360 tcg_gen_xor_tl(t0
, r1
, r2
);
1361 tcg_gen_andc_tl(cpu_PSW_V
, cpu_PSW_V
, t0
);
1363 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1364 /* Calc AV/SAV bits */
1365 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1366 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1368 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1369 /* write back result */
1370 tcg_gen_mov_tl(ret
, result
);
1373 static inline void gen_addci_CC(TCGv ret
, TCGv r1
, int32_t con
)
1375 TCGv temp
= tcg_constant_i32(con
);
1376 gen_addc_CC(ret
, r1
, temp
);
1379 static inline void gen_cond_add(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1382 TCGv temp
= tcg_temp_new();
1383 TCGv temp2
= tcg_temp_new();
1384 TCGv result
= tcg_temp_new();
1385 TCGv mask
= tcg_temp_new();
1386 TCGv t0
= tcg_constant_i32(0);
1388 /* create mask for sticky bits */
1389 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1390 tcg_gen_shli_tl(mask
, mask
, 31);
1392 tcg_gen_add_tl(result
, r1
, r2
);
1394 tcg_gen_xor_tl(temp
, result
, r1
);
1395 tcg_gen_xor_tl(temp2
, r1
, r2
);
1396 tcg_gen_andc_tl(temp
, temp
, temp2
);
1397 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1399 tcg_gen_and_tl(temp
, temp
, mask
);
1400 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1402 tcg_gen_add_tl(temp
, result
, result
);
1403 tcg_gen_xor_tl(temp
, temp
, result
);
1404 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1406 tcg_gen_and_tl(temp
, temp
, mask
);
1407 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1408 /* write back result */
1409 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1412 static inline void gen_condi_add(TCGCond cond
, TCGv r1
, int32_t r2
,
1415 TCGv temp
= tcg_constant_i32(r2
);
1416 gen_cond_add(cond
, r1
, temp
, r3
, r4
);
1419 static inline void gen_sub_d(TCGv ret
, TCGv r1
, TCGv r2
)
1421 TCGv temp
= tcg_temp_new_i32();
1422 TCGv result
= tcg_temp_new_i32();
1424 tcg_gen_sub_tl(result
, r1
, r2
);
1426 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1427 tcg_gen_xor_tl(temp
, r1
, r2
);
1428 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1430 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1432 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1433 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1435 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1436 /* write back result */
1437 tcg_gen_mov_tl(ret
, result
);
1441 gen_sub64_d(TCGv_i64 ret
, TCGv_i64 r1
, TCGv_i64 r2
)
1443 TCGv temp
= tcg_temp_new();
1444 TCGv_i64 t0
= tcg_temp_new_i64();
1445 TCGv_i64 t1
= tcg_temp_new_i64();
1446 TCGv_i64 result
= tcg_temp_new_i64();
1448 tcg_gen_sub_i64(result
, r1
, r2
);
1450 tcg_gen_xor_i64(t1
, result
, r1
);
1451 tcg_gen_xor_i64(t0
, r1
, r2
);
1452 tcg_gen_and_i64(t1
, t1
, t0
);
1453 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t1
);
1455 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1456 /* calc AV/SAV bits */
1457 tcg_gen_extrh_i64_i32(temp
, result
);
1458 tcg_gen_add_tl(cpu_PSW_AV
, temp
, temp
);
1459 tcg_gen_xor_tl(cpu_PSW_AV
, temp
, cpu_PSW_AV
);
1461 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1462 /* write back result */
1463 tcg_gen_mov_i64(ret
, result
);
1466 static inline void gen_sub_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1468 TCGv result
= tcg_temp_new();
1469 TCGv temp
= tcg_temp_new();
1471 tcg_gen_sub_tl(result
, r1
, r2
);
1473 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_PSW_C
, r1
, r2
);
1475 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
1476 tcg_gen_xor_tl(temp
, r1
, r2
);
1477 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1479 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1481 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
1482 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
1484 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1485 /* write back result */
1486 tcg_gen_mov_tl(ret
, result
);
1489 static inline void gen_subc_CC(TCGv ret
, TCGv r1
, TCGv r2
)
1491 TCGv temp
= tcg_temp_new();
1492 tcg_gen_not_tl(temp
, r2
);
1493 gen_addc_CC(ret
, r1
, temp
);
1496 static inline void gen_cond_sub(TCGCond cond
, TCGv r1
, TCGv r2
, TCGv r3
,
1499 TCGv temp
= tcg_temp_new();
1500 TCGv temp2
= tcg_temp_new();
1501 TCGv result
= tcg_temp_new();
1502 TCGv mask
= tcg_temp_new();
1503 TCGv t0
= tcg_constant_i32(0);
1505 /* create mask for sticky bits */
1506 tcg_gen_setcond_tl(cond
, mask
, r4
, t0
);
1507 tcg_gen_shli_tl(mask
, mask
, 31);
1509 tcg_gen_sub_tl(result
, r1
, r2
);
1511 tcg_gen_xor_tl(temp
, result
, r1
);
1512 tcg_gen_xor_tl(temp2
, r1
, r2
);
1513 tcg_gen_and_tl(temp
, temp
, temp2
);
1514 tcg_gen_movcond_tl(cond
, cpu_PSW_V
, r4
, t0
, temp
, cpu_PSW_V
);
1516 tcg_gen_and_tl(temp
, temp
, mask
);
1517 tcg_gen_or_tl(cpu_PSW_SV
, temp
, cpu_PSW_SV
);
1519 tcg_gen_add_tl(temp
, result
, result
);
1520 tcg_gen_xor_tl(temp
, temp
, result
);
1521 tcg_gen_movcond_tl(cond
, cpu_PSW_AV
, r4
, t0
, temp
, cpu_PSW_AV
);
1523 tcg_gen_and_tl(temp
, temp
, mask
);
1524 tcg_gen_or_tl(cpu_PSW_SAV
, temp
, cpu_PSW_SAV
);
1525 /* write back result */
1526 tcg_gen_movcond_tl(cond
, r3
, r4
, t0
, result
, r1
);
1530 gen_msub_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1531 TCGv r3
, uint32_t n
, uint32_t mode
)
1533 TCGv t_n
= tcg_constant_i32(n
);
1534 TCGv temp
= tcg_temp_new();
1535 TCGv temp2
= tcg_temp_new();
1536 TCGv_i64 temp64
= tcg_temp_new_i64();
1539 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1542 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1545 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1548 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1551 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1552 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1553 tcg_gen_sub_tl
, tcg_gen_sub_tl
);
1557 gen_msubs_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1558 TCGv r3
, uint32_t n
, uint32_t mode
)
1560 TCGv t_n
= tcg_constant_i32(n
);
1561 TCGv temp
= tcg_temp_new();
1562 TCGv temp2
= tcg_temp_new();
1563 TCGv temp3
= tcg_temp_new();
1564 TCGv_i64 temp64
= tcg_temp_new_i64();
1568 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1571 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1574 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1577 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1580 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1581 gen_subs(ret_low
, r1_low
, temp
);
1582 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
1583 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
1584 gen_subs(ret_high
, r1_high
, temp2
);
1585 /* combine v bits */
1586 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1587 /* combine av bits */
1588 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
1592 gen_msubm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1593 TCGv r3
, uint32_t n
, uint32_t mode
)
1595 TCGv t_n
= tcg_constant_i32(n
);
1596 TCGv_i64 temp64
= tcg_temp_new_i64();
1597 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1598 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1601 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1604 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1607 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1610 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1613 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1614 gen_sub64_d(temp64_3
, temp64_2
, temp64
);
1615 /* write back result */
1616 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_3
);
1620 gen_msubms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1621 TCGv r3
, uint32_t n
, uint32_t mode
)
1623 TCGv t_n
= tcg_constant_i32(n
);
1624 TCGv_i64 temp64
= tcg_temp_new_i64();
1625 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1628 GEN_HELPER_LL(mulm_h
, temp64
, r2
, r3
, t_n
);
1631 GEN_HELPER_LU(mulm_h
, temp64
, r2
, r3
, t_n
);
1634 GEN_HELPER_UL(mulm_h
, temp64
, r2
, r3
, t_n
);
1637 GEN_HELPER_UU(mulm_h
, temp64
, r2
, r3
, t_n
);
1640 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
1641 gen_helper_sub64_ssov(temp64
, tcg_env
, temp64_2
, temp64
);
1642 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
1646 gen_msubr64_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
, uint32_t n
,
1649 TCGv t_n
= tcg_constant_i32(n
);
1650 TCGv_i64 temp64
= tcg_temp_new_i64();
1653 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1656 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1659 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1662 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1665 gen_helper_subr_h(ret
, tcg_env
, temp64
, r1_low
, r1_high
);
1669 gen_msubr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1671 TCGv temp
= tcg_temp_new();
1672 TCGv temp2
= tcg_temp_new();
1674 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1675 tcg_gen_shli_tl(temp
, r1
, 16);
1676 gen_msubr64_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1680 gen_msubr64s_h(TCGv ret
, TCGv r1_low
, TCGv r1_high
, TCGv r2
, TCGv r3
,
1681 uint32_t n
, uint32_t mode
)
1683 TCGv t_n
= tcg_constant_i32(n
);
1684 TCGv_i64 temp64
= tcg_temp_new_i64();
1687 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1690 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1693 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1696 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1699 gen_helper_subr_h_ssov(ret
, tcg_env
, temp64
, r1_low
, r1_high
);
1703 gen_msubr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
1705 TCGv temp
= tcg_temp_new();
1706 TCGv temp2
= tcg_temp_new();
1708 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
1709 tcg_gen_shli_tl(temp
, r1
, 16);
1710 gen_msubr64s_h(ret
, temp
, temp2
, r2
, r3
, n
, mode
);
1714 gen_msubr_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1716 TCGv temp
= tcg_constant_i32(n
);
1717 gen_helper_msubr_q(ret
, tcg_env
, r1
, r2
, r3
, temp
);
1721 gen_msubrs_q(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
)
1723 TCGv temp
= tcg_constant_i32(n
);
1724 gen_helper_msubr_q_ssov(ret
, tcg_env
, r1
, r2
, r3
, temp
);
1728 gen_msub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1731 TCGv temp3
= tcg_temp_new();
1732 TCGv_i64 t1
= tcg_temp_new_i64();
1733 TCGv_i64 t2
= tcg_temp_new_i64();
1734 TCGv_i64 t3
= tcg_temp_new_i64();
1735 TCGv_i64 t4
= tcg_temp_new_i64();
1737 tcg_gen_ext_i32_i64(t2
, arg2
);
1738 tcg_gen_ext_i32_i64(t3
, arg3
);
1740 tcg_gen_mul_i64(t2
, t2
, t3
);
1742 tcg_gen_ext_i32_i64(t1
, arg1
);
1743 /* if we shift part of the fraction out, we need to round up */
1744 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1745 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1746 tcg_gen_sari_i64(t2
, t2
, up_shift
- n
);
1747 tcg_gen_add_i64(t2
, t2
, t4
);
1749 tcg_gen_sub_i64(t3
, t1
, t2
);
1750 tcg_gen_extrl_i64_i32(temp3
, t3
);
1752 tcg_gen_setcondi_i64(TCG_COND_GT
, t1
, t3
, 0x7fffffffLL
);
1753 tcg_gen_setcondi_i64(TCG_COND_LT
, t2
, t3
, -0x80000000LL
);
1754 tcg_gen_or_i64(t1
, t1
, t2
);
1755 tcg_gen_extrl_i64_i32(cpu_PSW_V
, t1
);
1756 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
1758 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1759 /* Calc AV/SAV bits */
1760 tcg_gen_add_tl(cpu_PSW_AV
, temp3
, temp3
);
1761 tcg_gen_xor_tl(cpu_PSW_AV
, temp3
, cpu_PSW_AV
);
1763 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1764 /* write back result */
1765 tcg_gen_mov_tl(ret
, temp3
);
1769 gen_m16sub32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1771 TCGv temp
= tcg_temp_new();
1772 TCGv temp2
= tcg_temp_new();
1774 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1775 } else { /* n is expected to be 1 */
1776 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1777 tcg_gen_shli_tl(temp
, temp
, 1);
1778 /* catch special case r1 = r2 = 0x8000 */
1779 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1780 tcg_gen_sub_tl(temp
, temp
, temp2
);
1782 gen_sub_d(ret
, arg1
, temp
);
1786 gen_m16subs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
)
1788 TCGv temp
= tcg_temp_new();
1789 TCGv temp2
= tcg_temp_new();
1791 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1792 } else { /* n is expected to be 1 */
1793 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1794 tcg_gen_shli_tl(temp
, temp
, 1);
1795 /* catch special case r1 = r2 = 0x8000 */
1796 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1797 tcg_gen_sub_tl(temp
, temp
, temp2
);
1799 gen_subs(ret
, arg1
, temp
);
1803 gen_m16sub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1804 TCGv arg3
, uint32_t n
)
1806 TCGv temp
= tcg_temp_new();
1807 TCGv temp2
= tcg_temp_new();
1808 TCGv_i64 t1
= tcg_temp_new_i64();
1809 TCGv_i64 t2
= tcg_temp_new_i64();
1810 TCGv_i64 t3
= tcg_temp_new_i64();
1813 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1814 } else { /* n is expected to be 1 */
1815 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1816 tcg_gen_shli_tl(temp
, temp
, 1);
1817 /* catch special case r1 = r2 = 0x8000 */
1818 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1819 tcg_gen_sub_tl(temp
, temp
, temp2
);
1821 tcg_gen_ext_i32_i64(t2
, temp
);
1822 tcg_gen_shli_i64(t2
, t2
, 16);
1823 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1824 gen_sub64_d(t3
, t1
, t2
);
1825 /* write back result */
1826 tcg_gen_extr_i64_i32(rl
, rh
, t3
);
1830 gen_m16subs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1831 TCGv arg3
, uint32_t n
)
1833 TCGv temp
= tcg_temp_new();
1834 TCGv temp2
= tcg_temp_new();
1835 TCGv_i64 t1
= tcg_temp_new_i64();
1836 TCGv_i64 t2
= tcg_temp_new_i64();
1839 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1840 } else { /* n is expected to be 1 */
1841 tcg_gen_mul_tl(temp
, arg2
, arg3
);
1842 tcg_gen_shli_tl(temp
, temp
, 1);
1843 /* catch special case r1 = r2 = 0x8000 */
1844 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, temp
, 0x80000000);
1845 tcg_gen_sub_tl(temp
, temp
, temp2
);
1847 tcg_gen_ext_i32_i64(t2
, temp
);
1848 tcg_gen_shli_i64(t2
, t2
, 16);
1849 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1851 gen_helper_sub64_ssov(t1
, tcg_env
, t1
, t2
);
1852 tcg_gen_extr_i64_i32(rl
, rh
, t1
);
1856 gen_msub64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1857 TCGv arg3
, uint32_t n
)
1859 TCGv_i64 t1
= tcg_temp_new_i64();
1860 TCGv_i64 t2
= tcg_temp_new_i64();
1861 TCGv_i64 t3
= tcg_temp_new_i64();
1862 TCGv_i64 t4
= tcg_temp_new_i64();
1865 tcg_gen_concat_i32_i64(t1
, arg1_low
, arg1_high
);
1866 tcg_gen_ext_i32_i64(t2
, arg2
);
1867 tcg_gen_ext_i32_i64(t3
, arg3
);
1869 tcg_gen_mul_i64(t2
, t2
, t3
);
1871 tcg_gen_shli_i64(t2
, t2
, 1);
1873 tcg_gen_sub_i64(t4
, t1
, t2
);
1875 tcg_gen_xor_i64(t3
, t4
, t1
);
1876 tcg_gen_xor_i64(t2
, t1
, t2
);
1877 tcg_gen_and_i64(t3
, t3
, t2
);
1878 tcg_gen_extrh_i64_i32(cpu_PSW_V
, t3
);
1879 /* We produce an overflow on the host if the mul before was
1880 (0x80000000 * 0x80000000) << 1). If this is the
1881 case, we negate the ovf. */
1883 temp
= tcg_temp_new();
1884 temp2
= tcg_temp_new();
1885 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, arg2
, 0x80000000);
1886 tcg_gen_setcond_tl(TCG_COND_EQ
, temp2
, arg2
, arg3
);
1887 tcg_gen_and_tl(temp
, temp
, temp2
);
1888 tcg_gen_shli_tl(temp
, temp
, 31);
1889 /* negate v bit, if special condition */
1890 tcg_gen_xor_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
1892 /* write back result */
1893 tcg_gen_extr_i64_i32(rl
, rh
, t4
);
1895 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
1896 /* Calc AV/SAV bits */
1897 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
1898 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
1900 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
1904 gen_msubs32_q(TCGv ret
, TCGv arg1
, TCGv arg2
, TCGv arg3
, uint32_t n
,
1907 TCGv_i64 t1
= tcg_temp_new_i64();
1908 TCGv_i64 t2
= tcg_temp_new_i64();
1909 TCGv_i64 t3
= tcg_temp_new_i64();
1910 TCGv_i64 t4
= tcg_temp_new_i64();
1912 tcg_gen_ext_i32_i64(t1
, arg1
);
1913 tcg_gen_ext_i32_i64(t2
, arg2
);
1914 tcg_gen_ext_i32_i64(t3
, arg3
);
1916 tcg_gen_mul_i64(t2
, t2
, t3
);
1917 /* if we shift part of the fraction out, we need to round up */
1918 tcg_gen_andi_i64(t4
, t2
, (1ll << (up_shift
- n
)) - 1);
1919 tcg_gen_setcondi_i64(TCG_COND_NE
, t4
, t4
, 0);
1920 tcg_gen_sari_i64(t3
, t2
, up_shift
- n
);
1921 tcg_gen_add_i64(t3
, t3
, t4
);
1923 gen_helper_msub32_q_sub_ssov(ret
, tcg_env
, t1
, t3
);
1927 gen_msubs64_q(TCGv rl
, TCGv rh
, TCGv arg1_low
, TCGv arg1_high
, TCGv arg2
,
1928 TCGv arg3
, uint32_t n
)
1930 TCGv_i64 r1
= tcg_temp_new_i64();
1931 TCGv t_n
= tcg_constant_i32(n
);
1933 tcg_gen_concat_i32_i64(r1
, arg1_low
, arg1_high
);
1934 gen_helper_msub64_q_ssov(r1
, tcg_env
, r1
, arg2
, arg3
, t_n
);
1935 tcg_gen_extr_i64_i32(rl
, rh
, r1
);
1939 gen_msubad_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1940 TCGv r3
, uint32_t n
, uint32_t mode
)
1942 TCGv t_n
= tcg_constant_i32(n
);
1943 TCGv temp
= tcg_temp_new();
1944 TCGv temp2
= tcg_temp_new();
1945 TCGv_i64 temp64
= tcg_temp_new_i64();
1948 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1951 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1954 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1957 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1960 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
1961 gen_addsub64_h(ret_low
, ret_high
, r1_low
, r1_high
, temp
, temp2
,
1962 tcg_gen_add_tl
, tcg_gen_sub_tl
);
1966 gen_msubadm_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
1967 TCGv r3
, uint32_t n
, uint32_t mode
)
1969 TCGv t_n
= tcg_constant_i32(n
);
1970 TCGv_i64 temp64
= tcg_temp_new_i64();
1971 TCGv_i64 temp64_2
= tcg_temp_new_i64();
1972 TCGv_i64 temp64_3
= tcg_temp_new_i64();
1975 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
1978 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
1981 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
1984 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
1987 tcg_gen_concat_i32_i64(temp64_3
, r1_low
, r1_high
);
1988 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
1989 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
1990 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
1991 tcg_gen_shli_i64(temp64
, temp64
, 16);
1993 gen_sub64_d(temp64_2
, temp64_3
, temp64
);
1994 /* write back result */
1995 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64_2
);
1999 gen_msubadr32_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2001 TCGv t_n
= tcg_constant_i32(n
);
2002 TCGv temp
= tcg_temp_new();
2003 TCGv temp2
= tcg_temp_new();
2004 TCGv_i64 temp64
= tcg_temp_new_i64();
2007 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2010 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2013 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2016 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2019 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2020 tcg_gen_shli_tl(temp
, r1
, 16);
2021 gen_helper_subadr_h(ret
, tcg_env
, temp64
, temp
, temp2
);
2025 gen_msubads_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2026 TCGv r3
, uint32_t n
, uint32_t mode
)
2028 TCGv t_n
= tcg_constant_i32(n
);
2029 TCGv temp
= tcg_temp_new();
2030 TCGv temp2
= tcg_temp_new();
2031 TCGv temp3
= tcg_temp_new();
2032 TCGv_i64 temp64
= tcg_temp_new_i64();
2036 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2039 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2042 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2045 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2048 tcg_gen_extr_i64_i32(temp
, temp2
, temp64
);
2049 gen_adds(ret_low
, r1_low
, temp
);
2050 tcg_gen_mov_tl(temp
, cpu_PSW_V
);
2051 tcg_gen_mov_tl(temp3
, cpu_PSW_AV
);
2052 gen_subs(ret_high
, r1_high
, temp2
);
2053 /* combine v bits */
2054 tcg_gen_or_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2055 /* combine av bits */
2056 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp3
);
2060 gen_msubadms_h(TCGv ret_low
, TCGv ret_high
, TCGv r1_low
, TCGv r1_high
, TCGv r2
,
2061 TCGv r3
, uint32_t n
, uint32_t mode
)
2063 TCGv t_n
= tcg_constant_i32(n
);
2064 TCGv_i64 temp64
= tcg_temp_new_i64();
2065 TCGv_i64 temp64_2
= tcg_temp_new_i64();
2069 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2072 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2075 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2078 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2081 tcg_gen_sari_i64(temp64_2
, temp64
, 32); /* high */
2082 tcg_gen_ext32s_i64(temp64
, temp64
); /* low */
2083 tcg_gen_sub_i64(temp64
, temp64_2
, temp64
);
2084 tcg_gen_shli_i64(temp64
, temp64
, 16);
2085 tcg_gen_concat_i32_i64(temp64_2
, r1_low
, r1_high
);
2087 gen_helper_sub64_ssov(temp64
, tcg_env
, temp64_2
, temp64
);
2088 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2092 gen_msubadr32s_h(TCGv ret
, TCGv r1
, TCGv r2
, TCGv r3
, uint32_t n
, uint32_t mode
)
2094 TCGv t_n
= tcg_constant_i32(n
);
2095 TCGv temp
= tcg_temp_new();
2096 TCGv temp2
= tcg_temp_new();
2097 TCGv_i64 temp64
= tcg_temp_new_i64();
2100 GEN_HELPER_LL(mul_h
, temp64
, r2
, r3
, t_n
);
2103 GEN_HELPER_LU(mul_h
, temp64
, r2
, r3
, t_n
);
2106 GEN_HELPER_UL(mul_h
, temp64
, r2
, r3
, t_n
);
2109 GEN_HELPER_UU(mul_h
, temp64
, r2
, r3
, t_n
);
2112 tcg_gen_andi_tl(temp2
, r1
, 0xffff0000);
2113 tcg_gen_shli_tl(temp
, r1
, 16);
2114 gen_helper_subadr_h_ssov(ret
, tcg_env
, temp64
, temp
, temp2
);
2117 static inline void gen_abs(TCGv ret
, TCGv r1
)
2119 tcg_gen_abs_tl(ret
, r1
);
2120 /* overflow can only happen, if r1 = 0x80000000 */
2121 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, r1
, 0x80000000);
2122 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2124 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2126 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2127 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2129 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2132 static inline void gen_absdif(TCGv ret
, TCGv r1
, TCGv r2
)
2134 TCGv temp
= tcg_temp_new_i32();
2135 TCGv result
= tcg_temp_new_i32();
2137 tcg_gen_sub_tl(result
, r1
, r2
);
2138 tcg_gen_sub_tl(temp
, r2
, r1
);
2139 tcg_gen_movcond_tl(TCG_COND_GT
, result
, r1
, r2
, result
, temp
);
2142 tcg_gen_xor_tl(cpu_PSW_V
, result
, r1
);
2143 tcg_gen_xor_tl(temp
, result
, r2
);
2144 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_PSW_V
, r1
, r2
, cpu_PSW_V
, temp
);
2145 tcg_gen_xor_tl(temp
, r1
, r2
);
2146 tcg_gen_and_tl(cpu_PSW_V
, cpu_PSW_V
, temp
);
2148 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2150 tcg_gen_add_tl(cpu_PSW_AV
, result
, result
);
2151 tcg_gen_xor_tl(cpu_PSW_AV
, result
, cpu_PSW_AV
);
2153 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2154 /* write back result */
2155 tcg_gen_mov_tl(ret
, result
);
2158 static inline void gen_absdifi(TCGv ret
, TCGv r1
, int32_t con
)
2160 TCGv temp
= tcg_constant_i32(con
);
2161 gen_absdif(ret
, r1
, temp
);
2164 static inline void gen_absdifsi(TCGv ret
, TCGv r1
, int32_t con
)
2166 TCGv temp
= tcg_constant_i32(con
);
2167 gen_helper_absdif_ssov(ret
, tcg_env
, r1
, temp
);
2170 static inline void gen_mul_i32s(TCGv ret
, TCGv r1
, TCGv r2
)
2172 TCGv high
= tcg_temp_new();
2173 TCGv low
= tcg_temp_new();
2175 tcg_gen_muls2_tl(low
, high
, r1
, r2
);
2176 tcg_gen_mov_tl(ret
, low
);
2178 tcg_gen_sari_tl(low
, low
, 31);
2179 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_PSW_V
, high
, low
);
2180 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2182 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2184 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2185 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2187 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2190 static inline void gen_muli_i32s(TCGv ret
, TCGv r1
, int32_t con
)
2192 TCGv temp
= tcg_constant_i32(con
);
2193 gen_mul_i32s(ret
, r1
, temp
);
2196 static inline void gen_mul_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2198 tcg_gen_muls2_tl(ret_low
, ret_high
, r1
, r2
);
2200 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2202 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2204 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2205 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2207 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2210 static inline void gen_muli_i64s(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2213 TCGv temp
= tcg_constant_i32(con
);
2214 gen_mul_i64s(ret_low
, ret_high
, r1
, temp
);
2217 static inline void gen_mul_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2
)
2219 tcg_gen_mulu2_tl(ret_low
, ret_high
, r1
, r2
);
2221 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2223 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2225 tcg_gen_add_tl(cpu_PSW_AV
, ret_high
, ret_high
);
2226 tcg_gen_xor_tl(cpu_PSW_AV
, ret_high
, cpu_PSW_AV
);
2228 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2231 static inline void gen_muli_i64u(TCGv ret_low
, TCGv ret_high
, TCGv r1
,
2234 TCGv temp
= tcg_constant_i32(con
);
2235 gen_mul_i64u(ret_low
, ret_high
, r1
, temp
);
2238 static inline void gen_mulsi_i32(TCGv ret
, TCGv r1
, int32_t con
)
2240 TCGv temp
= tcg_constant_i32(con
);
2241 gen_helper_mul_ssov(ret
, tcg_env
, r1
, temp
);
2244 static inline void gen_mulsui_i32(TCGv ret
, TCGv r1
, int32_t con
)
2246 TCGv temp
= tcg_constant_i32(con
);
2247 gen_helper_mul_suov(ret
, tcg_env
, r1
, temp
);
2250 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
2251 static inline void gen_maddsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2253 TCGv temp
= tcg_constant_i32(con
);
2254 gen_helper_madd32_ssov(ret
, tcg_env
, r1
, r2
, temp
);
2257 static inline void gen_maddsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2259 TCGv temp
= tcg_constant_i32(con
);
2260 gen_helper_madd32_suov(ret
, tcg_env
, r1
, r2
, temp
);
2264 gen_mul_q(TCGv rl
, TCGv rh
, TCGv arg1
, TCGv arg2
, uint32_t n
, uint32_t up_shift
)
2266 TCGv_i64 temp_64
= tcg_temp_new_i64();
2267 TCGv_i64 temp2_64
= tcg_temp_new_i64();
2270 if (up_shift
== 32) {
2271 tcg_gen_muls2_tl(rh
, rl
, arg1
, arg2
);
2272 } else if (up_shift
== 16) {
2273 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2274 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2276 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2277 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
);
2278 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2280 tcg_gen_muls2_tl(rl
, rh
, arg1
, arg2
);
2283 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2284 } else { /* n is expected to be 1 */
2285 tcg_gen_ext_i32_i64(temp_64
, arg1
);
2286 tcg_gen_ext_i32_i64(temp2_64
, arg2
);
2288 tcg_gen_mul_i64(temp_64
, temp_64
, temp2_64
);
2290 if (up_shift
== 0) {
2291 tcg_gen_shli_i64(temp_64
, temp_64
, 1);
2293 tcg_gen_shri_i64(temp_64
, temp_64
, up_shift
- 1);
2295 tcg_gen_extr_i64_i32(rl
, rh
, temp_64
);
2296 /* overflow only occurs if r1 = r2 = 0x8000 */
2297 if (up_shift
== 0) {/* result is 64 bit */
2298 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rh
,
2300 } else { /* result is 32 bit */
2301 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, rl
,
2304 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2305 /* calc sv overflow bit */
2306 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
2308 /* calc av overflow bit */
2309 if (up_shift
== 0) {
2310 tcg_gen_add_tl(cpu_PSW_AV
, rh
, rh
);
2311 tcg_gen_xor_tl(cpu_PSW_AV
, rh
, cpu_PSW_AV
);
2313 tcg_gen_add_tl(cpu_PSW_AV
, rl
, rl
);
2314 tcg_gen_xor_tl(cpu_PSW_AV
, rl
, cpu_PSW_AV
);
2316 /* calc sav overflow bit */
2317 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2321 gen_mul_q_16(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2323 TCGv temp
= tcg_temp_new();
2325 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2326 } else { /* n is expected to be 1 */
2327 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2328 tcg_gen_shli_tl(ret
, ret
, 1);
2329 /* catch special case r1 = r2 = 0x8000 */
2330 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80000000);
2331 tcg_gen_sub_tl(ret
, ret
, temp
);
2334 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2335 /* calc av overflow bit */
2336 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2337 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2338 /* calc sav overflow bit */
2339 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2342 static void gen_mulr_q(TCGv ret
, TCGv arg1
, TCGv arg2
, uint32_t n
)
2344 TCGv temp
= tcg_temp_new();
2346 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2347 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2349 tcg_gen_mul_tl(ret
, arg1
, arg2
);
2350 tcg_gen_shli_tl(ret
, ret
, 1);
2351 tcg_gen_addi_tl(ret
, ret
, 0x8000);
2352 /* catch special case r1 = r2 = 0x8000 */
2353 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, ret
, 0x80008000);
2354 tcg_gen_muli_tl(temp
, temp
, 0x8001);
2355 tcg_gen_sub_tl(ret
, ret
, temp
);
2358 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2359 /* calc av overflow bit */
2360 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2361 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2362 /* calc sav overflow bit */
2363 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2364 /* cut halfword off */
2365 tcg_gen_andi_tl(ret
, ret
, 0xffff0000);
2369 gen_madds_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2372 TCGv_i64 temp64
= tcg_temp_new_i64();
2373 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2374 gen_helper_madd64_ssov(temp64
, tcg_env
, r1
, temp64
, r3
);
2375 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2379 gen_maddsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2382 TCGv temp
= tcg_constant_i32(con
);
2383 gen_madds_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2387 gen_maddsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2390 TCGv_i64 temp64
= tcg_temp_new_i64();
2391 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2392 gen_helper_madd64_suov(temp64
, tcg_env
, r1
, temp64
, r3
);
2393 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2397 gen_maddsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2400 TCGv temp
= tcg_constant_i32(con
);
2401 gen_maddsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2404 static inline void gen_msubsi_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2406 TCGv temp
= tcg_constant_i32(con
);
2407 gen_helper_msub32_ssov(ret
, tcg_env
, r1
, r2
, temp
);
2410 static inline void gen_msubsui_32(TCGv ret
, TCGv r1
, TCGv r2
, int32_t con
)
2412 TCGv temp
= tcg_constant_i32(con
);
2413 gen_helper_msub32_suov(ret
, tcg_env
, r1
, r2
, temp
);
2417 gen_msubs_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2420 TCGv_i64 temp64
= tcg_temp_new_i64();
2421 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2422 gen_helper_msub64_ssov(temp64
, tcg_env
, r1
, temp64
, r3
);
2423 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2427 gen_msubsi_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2430 TCGv temp
= tcg_constant_i32(con
);
2431 gen_msubs_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2435 gen_msubsu_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2438 TCGv_i64 temp64
= tcg_temp_new_i64();
2439 tcg_gen_concat_i32_i64(temp64
, r2_low
, r2_high
);
2440 gen_helper_msub64_suov(temp64
, tcg_env
, r1
, temp64
, r3
);
2441 tcg_gen_extr_i64_i32(ret_low
, ret_high
, temp64
);
2445 gen_msubsui_64(TCGv ret_low
, TCGv ret_high
, TCGv r1
, TCGv r2_low
, TCGv r2_high
,
2448 TCGv temp
= tcg_constant_i32(con
);
2449 gen_msubsu_64(ret_low
, ret_high
, r1
, r2_low
, r2_high
, temp
);
2452 static void gen_saturate(TCGv ret
, TCGv arg
, int32_t up
, int32_t low
)
2454 tcg_gen_smax_tl(ret
, arg
, tcg_constant_i32(low
));
2455 tcg_gen_smin_tl(ret
, ret
, tcg_constant_i32(up
));
2458 static void gen_saturate_u(TCGv ret
, TCGv arg
, int32_t up
)
2460 tcg_gen_umin_tl(ret
, arg
, tcg_constant_i32(up
));
2463 static void gen_shi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2465 if (shift_count
== -32) {
2466 tcg_gen_movi_tl(ret
, 0);
2467 } else if (shift_count
>= 0) {
2468 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2470 tcg_gen_shri_tl(ret
, r1
, -shift_count
);
2474 static void gen_sh_hi(TCGv ret
, TCGv r1
, int32_t shiftcount
)
2476 TCGv temp_low
, temp_high
;
2478 if (shiftcount
== -16) {
2479 tcg_gen_movi_tl(ret
, 0);
2481 temp_high
= tcg_temp_new();
2482 temp_low
= tcg_temp_new();
2484 tcg_gen_andi_tl(temp_low
, r1
, 0xffff);
2485 tcg_gen_andi_tl(temp_high
, r1
, 0xffff0000);
2486 gen_shi(temp_low
, temp_low
, shiftcount
);
2487 gen_shi(ret
, temp_high
, shiftcount
);
2488 tcg_gen_deposit_tl(ret
, ret
, temp_low
, 0, 16);
2492 static void gen_shaci(TCGv ret
, TCGv r1
, int32_t shift_count
)
2494 uint32_t msk
, msk_start
;
2495 TCGv temp
= tcg_temp_new();
2496 TCGv temp2
= tcg_temp_new();
2498 if (shift_count
== 0) {
2499 /* Clear PSW.C and PSW.V */
2500 tcg_gen_movi_tl(cpu_PSW_C
, 0);
2501 tcg_gen_mov_tl(cpu_PSW_V
, cpu_PSW_C
);
2502 tcg_gen_mov_tl(ret
, r1
);
2503 } else if (shift_count
== -32) {
2505 tcg_gen_mov_tl(cpu_PSW_C
, r1
);
2506 /* fill ret completely with sign bit */
2507 tcg_gen_sari_tl(ret
, r1
, 31);
2509 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2510 } else if (shift_count
> 0) {
2511 TCGv t_max
= tcg_constant_i32(0x7FFFFFFF >> shift_count
);
2512 TCGv t_min
= tcg_constant_i32(((int32_t) -0x80000000) >> shift_count
);
2515 msk_start
= 32 - shift_count
;
2516 msk
= ((1 << shift_count
) - 1) << msk_start
;
2517 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2518 /* calc v/sv bits */
2519 tcg_gen_setcond_tl(TCG_COND_GT
, temp
, r1
, t_max
);
2520 tcg_gen_setcond_tl(TCG_COND_LT
, temp2
, r1
, t_min
);
2521 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
2522 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
2524 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_V
, cpu_PSW_SV
);
2526 tcg_gen_shli_tl(ret
, r1
, shift_count
);
2529 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2531 msk
= (1 << -shift_count
) - 1;
2532 tcg_gen_andi_tl(cpu_PSW_C
, r1
, msk
);
2534 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2536 /* calc av overflow bit */
2537 tcg_gen_add_tl(cpu_PSW_AV
, ret
, ret
);
2538 tcg_gen_xor_tl(cpu_PSW_AV
, ret
, cpu_PSW_AV
);
2539 /* calc sav overflow bit */
2540 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2543 static void gen_shas(TCGv ret
, TCGv r1
, TCGv r2
)
2545 gen_helper_sha_ssov(ret
, tcg_env
, r1
, r2
);
2548 static void gen_shasi(TCGv ret
, TCGv r1
, int32_t con
)
2550 TCGv temp
= tcg_constant_i32(con
);
2551 gen_shas(ret
, r1
, temp
);
2554 static void gen_sha_hi(TCGv ret
, TCGv r1
, int32_t shift_count
)
2558 if (shift_count
== 0) {
2559 tcg_gen_mov_tl(ret
, r1
);
2560 } else if (shift_count
> 0) {
2561 low
= tcg_temp_new();
2562 high
= tcg_temp_new();
2564 tcg_gen_andi_tl(high
, r1
, 0xffff0000);
2565 tcg_gen_shli_tl(low
, r1
, shift_count
);
2566 tcg_gen_shli_tl(ret
, high
, shift_count
);
2567 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2569 low
= tcg_temp_new();
2570 high
= tcg_temp_new();
2572 tcg_gen_ext16s_tl(low
, r1
);
2573 tcg_gen_sari_tl(low
, low
, -shift_count
);
2574 tcg_gen_sari_tl(ret
, r1
, -shift_count
);
2575 tcg_gen_deposit_tl(ret
, ret
, low
, 0, 16);
2579 /* ret = {ret[30:0], (r1 cond r2)}; */
2580 static void gen_sh_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
)
2582 TCGv temp
= tcg_temp_new();
2583 TCGv temp2
= tcg_temp_new();
2585 tcg_gen_shli_tl(temp
, ret
, 1);
2586 tcg_gen_setcond_tl(cond
, temp2
, r1
, r2
);
2587 tcg_gen_or_tl(ret
, temp
, temp2
);
2590 static void gen_sh_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
)
2592 TCGv temp
= tcg_constant_i32(con
);
2593 gen_sh_cond(cond
, ret
, r1
, temp
);
2596 static inline void gen_adds(TCGv ret
, TCGv r1
, TCGv r2
)
2598 gen_helper_add_ssov(ret
, tcg_env
, r1
, r2
);
2601 static inline void gen_addsi(TCGv ret
, TCGv r1
, int32_t con
)
2603 TCGv temp
= tcg_constant_i32(con
);
2604 gen_helper_add_ssov(ret
, tcg_env
, r1
, temp
);
2607 static inline void gen_addsui(TCGv ret
, TCGv r1
, int32_t con
)
2609 TCGv temp
= tcg_constant_i32(con
);
2610 gen_helper_add_suov(ret
, tcg_env
, r1
, temp
);
2613 static inline void gen_subs(TCGv ret
, TCGv r1
, TCGv r2
)
2615 gen_helper_sub_ssov(ret
, tcg_env
, r1
, r2
);
2618 static inline void gen_subsu(TCGv ret
, TCGv r1
, TCGv r2
)
2620 gen_helper_sub_suov(ret
, tcg_env
, r1
, r2
);
2623 static inline void gen_bit_2op(TCGv ret
, TCGv r1
, TCGv r2
,
2625 void(*op1
)(TCGv
, TCGv
, TCGv
),
2626 void(*op2
)(TCGv
, TCGv
, TCGv
))
2630 temp1
= tcg_temp_new();
2631 temp2
= tcg_temp_new();
2633 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2634 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2636 (*op1
)(temp1
, temp1
, temp2
);
2637 (*op2
)(temp1
, ret
, temp1
);
2639 tcg_gen_deposit_tl(ret
, ret
, temp1
, 0, 1);
2642 /* ret = r1[pos1] op1 r2[pos2]; */
2643 static inline void gen_bit_1op(TCGv ret
, TCGv r1
, TCGv r2
,
2645 void(*op1
)(TCGv
, TCGv
, TCGv
))
2649 temp1
= tcg_temp_new();
2650 temp2
= tcg_temp_new();
2652 tcg_gen_shri_tl(temp2
, r2
, pos2
);
2653 tcg_gen_shri_tl(temp1
, r1
, pos1
);
2655 (*op1
)(ret
, temp1
, temp2
);
2657 tcg_gen_andi_tl(ret
, ret
, 0x1);
2660 static inline void gen_accumulating_cond(int cond
, TCGv ret
, TCGv r1
, TCGv r2
,
2661 void(*op
)(TCGv
, TCGv
, TCGv
))
2663 TCGv temp
= tcg_temp_new();
2664 TCGv temp2
= tcg_temp_new();
2665 /* temp = (arg1 cond arg2 )*/
2666 tcg_gen_setcond_tl(cond
, temp
, r1
, r2
);
2668 tcg_gen_andi_tl(temp2
, ret
, 0x1);
2669 /* temp = temp insn temp2 */
2670 (*op
)(temp
, temp
, temp2
);
2671 /* ret = {ret[31:1], temp} */
2672 tcg_gen_deposit_tl(ret
, ret
, temp
, 0, 1);
2676 gen_accumulating_condi(int cond
, TCGv ret
, TCGv r1
, int32_t con
,
2677 void(*op
)(TCGv
, TCGv
, TCGv
))
2679 TCGv temp
= tcg_constant_i32(con
);
2680 gen_accumulating_cond(cond
, ret
, r1
, temp
, op
);
2683 static inline void gen_eqany_bi(TCGv ret
, TCGv r1
, int32_t con
)
2685 TCGv b0
= tcg_temp_new();
2686 TCGv b1
= tcg_temp_new();
2687 TCGv b2
= tcg_temp_new();
2688 TCGv b3
= tcg_temp_new();
2691 tcg_gen_andi_tl(b0
, r1
, 0xff);
2692 tcg_gen_setcondi_tl(TCG_COND_EQ
, b0
, b0
, con
& 0xff);
2695 tcg_gen_andi_tl(b1
, r1
, 0xff00);
2696 tcg_gen_setcondi_tl(TCG_COND_EQ
, b1
, b1
, con
& 0xff00);
2699 tcg_gen_andi_tl(b2
, r1
, 0xff0000);
2700 tcg_gen_setcondi_tl(TCG_COND_EQ
, b2
, b2
, con
& 0xff0000);
2703 tcg_gen_andi_tl(b3
, r1
, 0xff000000);
2704 tcg_gen_setcondi_tl(TCG_COND_EQ
, b3
, b3
, con
& 0xff000000);
2707 tcg_gen_or_tl(ret
, b0
, b1
);
2708 tcg_gen_or_tl(ret
, ret
, b2
);
2709 tcg_gen_or_tl(ret
, ret
, b3
);
2712 static inline void gen_eqany_hi(TCGv ret
, TCGv r1
, int32_t con
)
2714 TCGv h0
= tcg_temp_new();
2715 TCGv h1
= tcg_temp_new();
2718 tcg_gen_andi_tl(h0
, r1
, 0xffff);
2719 tcg_gen_setcondi_tl(TCG_COND_EQ
, h0
, h0
, con
& 0xffff);
2722 tcg_gen_andi_tl(h1
, r1
, 0xffff0000);
2723 tcg_gen_setcondi_tl(TCG_COND_EQ
, h1
, h1
, con
& 0xffff0000);
2726 tcg_gen_or_tl(ret
, h0
, h1
);
2729 /* mask = ((1 << width) -1) << pos;
2730 ret = (r1 & ~mask) | (r2 << pos) & mask); */
2731 static inline void gen_insert(TCGv ret
, TCGv r1
, TCGv r2
, TCGv width
, TCGv pos
)
2733 TCGv mask
= tcg_temp_new();
2734 TCGv temp
= tcg_temp_new();
2735 TCGv temp2
= tcg_temp_new();
2737 tcg_gen_movi_tl(mask
, 1);
2738 tcg_gen_shl_tl(mask
, mask
, width
);
2739 tcg_gen_subi_tl(mask
, mask
, 1);
2740 tcg_gen_shl_tl(mask
, mask
, pos
);
2742 tcg_gen_shl_tl(temp
, r2
, pos
);
2743 tcg_gen_and_tl(temp
, temp
, mask
);
2744 tcg_gen_andc_tl(temp2
, r1
, mask
);
2745 tcg_gen_or_tl(ret
, temp
, temp2
);
2748 static inline void gen_bsplit(TCGv rl
, TCGv rh
, TCGv r1
)
2750 TCGv_i64 temp
= tcg_temp_new_i64();
2752 gen_helper_bsplit(temp
, r1
);
2753 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2756 static inline void gen_unpack(TCGv rl
, TCGv rh
, TCGv r1
)
2758 TCGv_i64 temp
= tcg_temp_new_i64();
2760 gen_helper_unpack(temp
, r1
);
2761 tcg_gen_extr_i64_i32(rl
, rh
, temp
);
2765 gen_dvinit_b(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2767 TCGv_i64 ret
= tcg_temp_new_i64();
2769 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2770 gen_helper_dvinit_b_13(ret
, tcg_env
, r1
, r2
);
2772 gen_helper_dvinit_b_131(ret
, tcg_env
, r1
, r2
);
2774 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2778 gen_dvinit_h(DisasContext
*ctx
, TCGv rl
, TCGv rh
, TCGv r1
, TCGv r2
)
2780 TCGv_i64 ret
= tcg_temp_new_i64();
2782 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
2783 gen_helper_dvinit_h_13(ret
, tcg_env
, r1
, r2
);
2785 gen_helper_dvinit_h_131(ret
, tcg_env
, r1
, r2
);
2787 tcg_gen_extr_i64_i32(rl
, rh
, ret
);
2790 static void gen_calc_usb_mul_h(TCGv arg_low
, TCGv arg_high
)
2792 TCGv temp
= tcg_temp_new();
2794 tcg_gen_add_tl(temp
, arg_low
, arg_low
);
2795 tcg_gen_xor_tl(temp
, temp
, arg_low
);
2796 tcg_gen_add_tl(cpu_PSW_AV
, arg_high
, arg_high
);
2797 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_PSW_AV
, arg_high
);
2798 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2800 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2801 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2804 static void gen_calc_usb_mulr_h(TCGv arg
)
2806 TCGv temp
= tcg_temp_new();
2808 tcg_gen_add_tl(temp
, arg
, arg
);
2809 tcg_gen_xor_tl(temp
, temp
, arg
);
2810 tcg_gen_shli_tl(cpu_PSW_AV
, temp
, 16);
2811 tcg_gen_or_tl(cpu_PSW_AV
, cpu_PSW_AV
, temp
);
2813 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
2815 tcg_gen_movi_tl(cpu_PSW_V
, 0);
2818 /* helpers for generating program flow micro-ops */
2820 static inline void gen_save_pc(target_ulong pc
)
2822 tcg_gen_movi_tl(cpu_PC
, pc
);
2825 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
2827 if (translator_use_goto_tb(&ctx
->base
, dest
)) {
2830 tcg_gen_exit_tb(ctx
->base
.tb
, n
);
2833 tcg_gen_lookup_and_goto_ptr();
2835 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2838 static void generate_trap(DisasContext
*ctx
, int class, int tin
)
2840 TCGv_i32 classtemp
= tcg_constant_i32(class);
2841 TCGv_i32 tintemp
= tcg_constant_i32(tin
);
2843 gen_save_pc(ctx
->base
.pc_next
);
2844 gen_helper_raise_exception_sync(tcg_env
, classtemp
, tintemp
);
2845 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2848 static inline void gen_branch_cond(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2849 TCGv r2
, int16_t address
)
2851 TCGLabel
*jumpLabel
= gen_new_label();
2852 tcg_gen_brcond_tl(cond
, r1
, r2
, jumpLabel
);
2854 gen_goto_tb(ctx
, 1, ctx
->pc_succ_insn
);
2856 gen_set_label(jumpLabel
);
2857 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ address
* 2);
2860 static inline void gen_branch_condi(DisasContext
*ctx
, TCGCond cond
, TCGv r1
,
2861 int r2
, int16_t address
)
2863 TCGv temp
= tcg_constant_i32(r2
);
2864 gen_branch_cond(ctx
, cond
, r1
, temp
, address
);
2867 static void gen_loop(DisasContext
*ctx
, int r1
, int32_t offset
)
2869 TCGLabel
*l1
= gen_new_label();
2871 tcg_gen_subi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], 1);
2872 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_gpr_a
[r1
], -1, l1
);
2873 gen_goto_tb(ctx
, 1, ctx
->base
.pc_next
+ offset
);
2875 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
2878 static void gen_fcall_save_ctx(DisasContext
*ctx
)
2880 TCGv temp
= tcg_temp_new();
2882 tcg_gen_addi_tl(temp
, cpu_gpr_a
[10], -4);
2883 tcg_gen_qemu_st_tl(cpu_gpr_a
[11], temp
, ctx
->mem_idx
, MO_LESL
);
2884 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
2885 tcg_gen_mov_tl(cpu_gpr_a
[10], temp
);
2888 static void gen_fret(DisasContext
*ctx
)
2890 TCGv temp
= tcg_temp_new();
2892 tcg_gen_andi_tl(temp
, cpu_gpr_a
[11], ~0x1);
2893 tcg_gen_qemu_ld_tl(cpu_gpr_a
[11], cpu_gpr_a
[10], ctx
->mem_idx
, MO_LESL
);
2894 tcg_gen_addi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], 4);
2895 tcg_gen_mov_tl(cpu_PC
, temp
);
2896 ctx
->base
.is_jmp
= DISAS_EXIT
;
2899 static void gen_compute_branch(DisasContext
*ctx
, uint32_t opc
, int r1
,
2900 int r2
, int32_t constant
, int32_t offset
)
2906 /* SB-format jumps */
2909 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2911 case OPC1_32_B_CALL
:
2912 case OPC1_16_SB_CALL
:
2913 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
2914 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
2917 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], 0, offset
);
2919 case OPC1_16_SB_JNZ
:
2920 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], 0, offset
);
2922 /* SBC-format jumps */
2923 case OPC1_16_SBC_JEQ
:
2924 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
, offset
);
2926 case OPC1_16_SBC_JEQ2
:
2927 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[15], constant
,
2930 case OPC1_16_SBC_JNE
:
2931 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15], constant
, offset
);
2933 case OPC1_16_SBC_JNE2
:
2934 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[15],
2935 constant
, offset
+ 16);
2937 /* SBRN-format jumps */
2938 case OPC1_16_SBRN_JZ_T
:
2939 temp
= tcg_temp_new();
2940 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2941 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
2943 case OPC1_16_SBRN_JNZ_T
:
2944 temp
= tcg_temp_new();
2945 tcg_gen_andi_tl(temp
, cpu_gpr_d
[15], 0x1u
<< constant
);
2946 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
2948 /* SBR-format jumps */
2949 case OPC1_16_SBR_JEQ
:
2950 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2953 case OPC1_16_SBR_JEQ2
:
2954 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2957 case OPC1_16_SBR_JNE
:
2958 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2961 case OPC1_16_SBR_JNE2
:
2962 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15],
2965 case OPC1_16_SBR_JNZ
:
2966 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], 0, offset
);
2968 case OPC1_16_SBR_JNZ_A
:
2969 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
2971 case OPC1_16_SBR_JGEZ
:
2972 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], 0, offset
);
2974 case OPC1_16_SBR_JGTZ
:
2975 gen_branch_condi(ctx
, TCG_COND_GT
, cpu_gpr_d
[r1
], 0, offset
);
2977 case OPC1_16_SBR_JLEZ
:
2978 gen_branch_condi(ctx
, TCG_COND_LE
, cpu_gpr_d
[r1
], 0, offset
);
2980 case OPC1_16_SBR_JLTZ
:
2981 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], 0, offset
);
2983 case OPC1_16_SBR_JZ
:
2984 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], 0, offset
);
2986 case OPC1_16_SBR_JZ_A
:
2987 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
2989 case OPC1_16_SBR_LOOP
:
2990 gen_loop(ctx
, r1
, offset
* 2 - 32);
2992 /* SR-format jumps */
2994 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], 0xfffffffe);
2995 ctx
->base
.is_jmp
= DISAS_EXIT
;
2997 case OPC2_32_SYS_RET
:
2998 case OPC2_16_SR_RET
:
2999 gen_helper_ret(tcg_env
);
3000 ctx
->base
.is_jmp
= DISAS_EXIT
;
3003 case OPC1_32_B_CALLA
:
3004 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
3005 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3007 case OPC1_32_B_FCALL
:
3008 gen_fcall_save_ctx(ctx
);
3009 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3011 case OPC1_32_B_FCALLA
:
3012 gen_fcall_save_ctx(ctx
);
3013 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3016 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3019 gen_goto_tb(ctx
, 0, EA_B_ABSOLUT(offset
));
3022 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
3023 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3026 case OPCM_32_BRC_EQ_NEQ
:
3027 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JEQ
) {
3028 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], constant
, offset
);
3030 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], constant
, offset
);
3033 case OPCM_32_BRC_GE
:
3034 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OP2_32_BRC_JGE
) {
3035 gen_branch_condi(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], constant
, offset
);
3037 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3038 gen_branch_condi(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], constant
,
3042 case OPCM_32_BRC_JLT
:
3043 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JLT
) {
3044 gen_branch_condi(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], constant
, offset
);
3046 constant
= MASK_OP_BRC_CONST4(ctx
->opcode
);
3047 gen_branch_condi(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], constant
,
3051 case OPCM_32_BRC_JNE
:
3052 temp
= tcg_temp_new();
3053 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRC_JNED
) {
3054 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3055 /* subi is unconditional */
3056 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3057 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3059 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3060 /* addi is unconditional */
3061 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3062 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, constant
, offset
);
3066 case OPCM_32_BRN_JTT
:
3067 n
= MASK_OP_BRN_N(ctx
->opcode
);
3069 temp
= tcg_temp_new();
3070 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r1
], (1 << n
));
3072 if (MASK_OP_BRN_OP2(ctx
->opcode
) == OPC2_32_BRN_JNZ_T
) {
3073 gen_branch_condi(ctx
, TCG_COND_NE
, temp
, 0, offset
);
3075 gen_branch_condi(ctx
, TCG_COND_EQ
, temp
, 0, offset
);
3079 case OPCM_32_BRR_EQ_NEQ
:
3080 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ
) {
3081 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3084 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3088 case OPCM_32_BRR_ADDR_EQ_NEQ
:
3089 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JEQ_A
) {
3090 gen_branch_cond(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3093 gen_branch_cond(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
3097 case OPCM_32_BRR_GE
:
3098 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JGE
) {
3099 gen_branch_cond(ctx
, TCG_COND_GE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3102 gen_branch_cond(ctx
, TCG_COND_GEU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3106 case OPCM_32_BRR_JLT
:
3107 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JLT
) {
3108 gen_branch_cond(ctx
, TCG_COND_LT
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3111 gen_branch_cond(ctx
, TCG_COND_LTU
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3115 case OPCM_32_BRR_LOOP
:
3116 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_LOOP
) {
3117 gen_loop(ctx
, r2
, offset
* 2);
3119 /* OPC2_32_BRR_LOOPU */
3120 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
+ offset
* 2);
3123 case OPCM_32_BRR_JNE
:
3124 temp
= tcg_temp_new();
3125 temp2
= tcg_temp_new();
3126 if (MASK_OP_BRC_OP2(ctx
->opcode
) == OPC2_32_BRR_JNED
) {
3127 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3128 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3129 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3130 /* subi is unconditional */
3131 tcg_gen_subi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3132 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3134 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
3135 /* also save r2, in case of r1 == r2, so r2 is not decremented */
3136 tcg_gen_mov_tl(temp2
, cpu_gpr_d
[r2
]);
3137 /* addi is unconditional */
3138 tcg_gen_addi_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 1);
3139 gen_branch_cond(ctx
, TCG_COND_NE
, temp
, temp2
, offset
);
3142 case OPCM_32_BRR_JNZ
:
3143 if (MASK_OP_BRR_OP2(ctx
->opcode
) == OPC2_32_BRR_JNZ_A
) {
3144 gen_branch_condi(ctx
, TCG_COND_NE
, cpu_gpr_a
[r1
], 0, offset
);
3146 gen_branch_condi(ctx
, TCG_COND_EQ
, cpu_gpr_a
[r1
], 0, offset
);
3150 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3156 * Functions for decoding instructions
3159 static void decode_src_opc(DisasContext
*ctx
, int op1
)
3165 r1
= MASK_OP_SRC_S1D(ctx
->opcode
);
3166 const4
= MASK_OP_SRC_CONST4_SEXT(ctx
->opcode
);
3169 case OPC1_16_SRC_ADD
:
3170 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3172 case OPC1_16_SRC_ADD_A15
:
3173 gen_addi_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], const4
);
3175 case OPC1_16_SRC_ADD_15A
:
3176 gen_addi_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], const4
);
3178 case OPC1_16_SRC_ADD_A
:
3179 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], const4
);
3181 case OPC1_16_SRC_CADD
:
3182 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3185 case OPC1_16_SRC_CADDN
:
3186 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const4
, cpu_gpr_d
[r1
],
3189 case OPC1_16_SRC_CMOV
:
3190 temp
= tcg_constant_tl(0);
3191 temp2
= tcg_constant_tl(const4
);
3192 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3193 temp2
, cpu_gpr_d
[r1
]);
3195 case OPC1_16_SRC_CMOVN
:
3196 temp
= tcg_constant_tl(0);
3197 temp2
= tcg_constant_tl(const4
);
3198 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3199 temp2
, cpu_gpr_d
[r1
]);
3201 case OPC1_16_SRC_EQ
:
3202 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3205 case OPC1_16_SRC_LT
:
3206 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3209 case OPC1_16_SRC_MOV
:
3210 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3212 case OPC1_16_SRC_MOV_A
:
3213 const4
= MASK_OP_SRC_CONST4(ctx
->opcode
);
3214 tcg_gen_movi_tl(cpu_gpr_a
[r1
], const4
);
3216 case OPC1_16_SRC_MOV_E
:
3217 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3219 tcg_gen_movi_tl(cpu_gpr_d
[r1
], const4
);
3220 tcg_gen_sari_tl(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], 31);
3222 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3225 case OPC1_16_SRC_SH
:
3226 gen_shi(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3228 case OPC1_16_SRC_SHA
:
3229 gen_shaci(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], const4
);
3232 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3236 static void decode_srr_opc(DisasContext
*ctx
, int op1
)
3241 r1
= MASK_OP_SRR_S1D(ctx
->opcode
);
3242 r2
= MASK_OP_SRR_S2(ctx
->opcode
);
3245 case OPC1_16_SRR_ADD
:
3246 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3248 case OPC1_16_SRR_ADD_A15
:
3249 gen_add_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3251 case OPC1_16_SRR_ADD_15A
:
3252 gen_add_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3254 case OPC1_16_SRR_ADD_A
:
3255 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3257 case OPC1_16_SRR_ADDS
:
3258 gen_adds(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3260 case OPC1_16_SRR_AND
:
3261 tcg_gen_and_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3263 case OPC1_16_SRR_CMOV
:
3264 temp
= tcg_constant_tl(0);
3265 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3266 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3268 case OPC1_16_SRR_CMOVN
:
3269 temp
= tcg_constant_tl(0);
3270 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[15], temp
,
3271 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
3273 case OPC1_16_SRR_EQ
:
3274 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3277 case OPC1_16_SRR_LT
:
3278 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[15], cpu_gpr_d
[r1
],
3281 case OPC1_16_SRR_MOV
:
3282 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3284 case OPC1_16_SRR_MOV_A
:
3285 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_d
[r2
]);
3287 case OPC1_16_SRR_MOV_AA
:
3288 tcg_gen_mov_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
3290 case OPC1_16_SRR_MOV_D
:
3291 tcg_gen_mov_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
]);
3293 case OPC1_16_SRR_MUL
:
3294 gen_mul_i32s(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3296 case OPC1_16_SRR_OR
:
3297 tcg_gen_or_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3299 case OPC1_16_SRR_SUB
:
3300 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3302 case OPC1_16_SRR_SUB_A15B
:
3303 gen_sub_d(cpu_gpr_d
[r1
], cpu_gpr_d
[15], cpu_gpr_d
[r2
]);
3305 case OPC1_16_SRR_SUB_15AB
:
3306 gen_sub_d(cpu_gpr_d
[15], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3308 case OPC1_16_SRR_SUBS
:
3309 gen_subs(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3311 case OPC1_16_SRR_XOR
:
3312 tcg_gen_xor_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
3315 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3319 static void decode_ssr_opc(DisasContext
*ctx
, int op1
)
3323 r1
= MASK_OP_SSR_S1(ctx
->opcode
);
3324 r2
= MASK_OP_SSR_S2(ctx
->opcode
);
3327 case OPC1_16_SSR_ST_A
:
3328 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3330 case OPC1_16_SSR_ST_A_POSTINC
:
3331 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3332 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3334 case OPC1_16_SSR_ST_B
:
3335 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3337 case OPC1_16_SSR_ST_B_POSTINC
:
3338 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3339 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3341 case OPC1_16_SSR_ST_H
:
3342 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3344 case OPC1_16_SSR_ST_H_POSTINC
:
3345 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUW
);
3346 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3348 case OPC1_16_SSR_ST_W
:
3349 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3351 case OPC1_16_SSR_ST_W_POSTINC
:
3352 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LEUL
);
3353 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3356 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3360 static void decode_sc_opc(DisasContext
*ctx
, int op1
)
3364 const16
= MASK_OP_SC_CONST8(ctx
->opcode
);
3367 case OPC1_16_SC_AND
:
3368 tcg_gen_andi_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3370 case OPC1_16_SC_BISR
:
3371 if (ctx
->priv
== TRICORE_PRIV_SM
) {
3372 gen_helper_1arg(bisr
, const16
& 0xff);
3374 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
3377 case OPC1_16_SC_LD_A
:
3378 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3380 case OPC1_16_SC_LD_W
:
3381 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3383 case OPC1_16_SC_MOV
:
3384 tcg_gen_movi_tl(cpu_gpr_d
[15], const16
);
3387 tcg_gen_ori_tl(cpu_gpr_d
[15], cpu_gpr_d
[15], const16
);
3389 case OPC1_16_SC_ST_A
:
3390 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3392 case OPC1_16_SC_ST_W
:
3393 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[10], const16
* 4, MO_LESL
);
3395 case OPC1_16_SC_SUB_A
:
3396 tcg_gen_subi_tl(cpu_gpr_a
[10], cpu_gpr_a
[10], const16
);
3399 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3403 static void decode_slr_opc(DisasContext
*ctx
, int op1
)
3407 r1
= MASK_OP_SLR_D(ctx
->opcode
);
3408 r2
= MASK_OP_SLR_S2(ctx
->opcode
);
3412 case OPC1_16_SLR_LD_A
:
3413 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3415 case OPC1_16_SLR_LD_A_POSTINC
:
3416 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3417 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3419 case OPC1_16_SLR_LD_BU
:
3420 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3422 case OPC1_16_SLR_LD_BU_POSTINC
:
3423 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_UB
);
3424 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 1);
3426 case OPC1_16_SLR_LD_H
:
3427 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3429 case OPC1_16_SLR_LD_H_POSTINC
:
3430 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESW
);
3431 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 2);
3433 case OPC1_16_SLR_LD_W
:
3434 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3436 case OPC1_16_SLR_LD_W_POSTINC
:
3437 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
, MO_LESL
);
3438 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], 4);
3441 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3445 static void decode_sro_opc(DisasContext
*ctx
, int op1
)
3450 r2
= MASK_OP_SRO_S2(ctx
->opcode
);
3451 address
= MASK_OP_SRO_OFF4(ctx
->opcode
);
3455 case OPC1_16_SRO_LD_A
:
3456 gen_offset_ld(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3458 case OPC1_16_SRO_LD_BU
:
3459 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3461 case OPC1_16_SRO_LD_H
:
3462 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3464 case OPC1_16_SRO_LD_W
:
3465 gen_offset_ld(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3467 case OPC1_16_SRO_ST_A
:
3468 gen_offset_st(ctx
, cpu_gpr_a
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3470 case OPC1_16_SRO_ST_B
:
3471 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
, MO_UB
);
3473 case OPC1_16_SRO_ST_H
:
3474 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 2, MO_LESW
);
3476 case OPC1_16_SRO_ST_W
:
3477 gen_offset_st(ctx
, cpu_gpr_d
[15], cpu_gpr_a
[r2
], address
* 4, MO_LESL
);
3480 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3484 static void decode_sr_system(DisasContext
*ctx
)
3487 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3490 case OPC2_16_SR_NOP
:
3492 case OPC2_16_SR_RET
:
3493 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
3495 case OPC2_16_SR_RFE
:
3496 gen_helper_rfe(tcg_env
);
3497 ctx
->base
.is_jmp
= DISAS_EXIT
;
3499 case OPC2_16_SR_DEBUG
:
3500 /* raise EXCP_DEBUG */
3502 case OPC2_16_SR_FRET
:
3506 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3510 static void decode_sr_accu(DisasContext
*ctx
)
3515 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3516 op2
= MASK_OP_SR_OP2(ctx
->opcode
);
3519 case OPC2_16_SR_RSUB
:
3520 /* calc V bit -- overflow only if r1 = -0x80000000 */
3521 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r1
], -0x80000000);
3522 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
3524 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
3526 tcg_gen_neg_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3528 tcg_gen_add_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3529 tcg_gen_xor_tl(cpu_PSW_AV
, cpu_gpr_d
[r1
], cpu_PSW_AV
);
3531 tcg_gen_or_tl(cpu_PSW_SAV
, cpu_PSW_SAV
, cpu_PSW_AV
);
3533 case OPC2_16_SR_SAT_B
:
3534 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7f, -0x80);
3536 case OPC2_16_SR_SAT_BU
:
3537 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xff);
3539 case OPC2_16_SR_SAT_H
:
3540 gen_saturate(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
3542 case OPC2_16_SR_SAT_HU
:
3543 gen_saturate_u(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 0xffff);
3546 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3550 static void decode_16Bit_opc(DisasContext
*ctx
)
3558 op1
= MASK_OP_MAJOR(ctx
->opcode
);
3560 /* handle ADDSC.A opcode only being 6 bit long */
3561 if (unlikely((op1
& 0x3f) == OPC1_16_SRRS_ADDSC_A
)) {
3562 op1
= OPC1_16_SRRS_ADDSC_A
;
3566 case OPC1_16_SRC_ADD
:
3567 case OPC1_16_SRC_ADD_A15
:
3568 case OPC1_16_SRC_ADD_15A
:
3569 case OPC1_16_SRC_ADD_A
:
3570 case OPC1_16_SRC_CADD
:
3571 case OPC1_16_SRC_CADDN
:
3572 case OPC1_16_SRC_CMOV
:
3573 case OPC1_16_SRC_CMOVN
:
3574 case OPC1_16_SRC_EQ
:
3575 case OPC1_16_SRC_LT
:
3576 case OPC1_16_SRC_MOV
:
3577 case OPC1_16_SRC_MOV_A
:
3578 case OPC1_16_SRC_MOV_E
:
3579 case OPC1_16_SRC_SH
:
3580 case OPC1_16_SRC_SHA
:
3581 decode_src_opc(ctx
, op1
);
3584 case OPC1_16_SRR_ADD
:
3585 case OPC1_16_SRR_ADD_A15
:
3586 case OPC1_16_SRR_ADD_15A
:
3587 case OPC1_16_SRR_ADD_A
:
3588 case OPC1_16_SRR_ADDS
:
3589 case OPC1_16_SRR_AND
:
3590 case OPC1_16_SRR_CMOV
:
3591 case OPC1_16_SRR_CMOVN
:
3592 case OPC1_16_SRR_EQ
:
3593 case OPC1_16_SRR_LT
:
3594 case OPC1_16_SRR_MOV
:
3595 case OPC1_16_SRR_MOV_A
:
3596 case OPC1_16_SRR_MOV_AA
:
3597 case OPC1_16_SRR_MOV_D
:
3598 case OPC1_16_SRR_MUL
:
3599 case OPC1_16_SRR_OR
:
3600 case OPC1_16_SRR_SUB
:
3601 case OPC1_16_SRR_SUB_A15B
:
3602 case OPC1_16_SRR_SUB_15AB
:
3603 case OPC1_16_SRR_SUBS
:
3604 case OPC1_16_SRR_XOR
:
3605 decode_srr_opc(ctx
, op1
);
3608 case OPC1_16_SSR_ST_A
:
3609 case OPC1_16_SSR_ST_A_POSTINC
:
3610 case OPC1_16_SSR_ST_B
:
3611 case OPC1_16_SSR_ST_B_POSTINC
:
3612 case OPC1_16_SSR_ST_H
:
3613 case OPC1_16_SSR_ST_H_POSTINC
:
3614 case OPC1_16_SSR_ST_W
:
3615 case OPC1_16_SSR_ST_W_POSTINC
:
3616 decode_ssr_opc(ctx
, op1
);
3619 case OPC1_16_SRRS_ADDSC_A
:
3620 r2
= MASK_OP_SRRS_S2(ctx
->opcode
);
3621 r1
= MASK_OP_SRRS_S1D(ctx
->opcode
);
3622 const16
= MASK_OP_SRRS_N(ctx
->opcode
);
3623 temp
= tcg_temp_new();
3624 tcg_gen_shli_tl(temp
, cpu_gpr_d
[15], const16
);
3625 tcg_gen_add_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], temp
);
3628 case OPC1_16_SLRO_LD_A
:
3629 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3630 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3631 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3633 case OPC1_16_SLRO_LD_BU
:
3634 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3635 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3636 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3638 case OPC1_16_SLRO_LD_H
:
3639 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3640 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3641 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3643 case OPC1_16_SLRO_LD_W
:
3644 r1
= MASK_OP_SLRO_D(ctx
->opcode
);
3645 const16
= MASK_OP_SLRO_OFF4(ctx
->opcode
);
3646 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3649 case OPC1_16_SB_CALL
:
3651 case OPC1_16_SB_JNZ
:
3653 address
= MASK_OP_SB_DISP8_SEXT(ctx
->opcode
);
3654 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
3657 case OPC1_16_SBC_JEQ
:
3658 case OPC1_16_SBC_JNE
:
3659 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3660 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3661 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3663 case OPC1_16_SBC_JEQ2
:
3664 case OPC1_16_SBC_JNE2
:
3665 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3666 address
= MASK_OP_SBC_DISP4(ctx
->opcode
);
3667 const16
= MASK_OP_SBC_CONST4_SEXT(ctx
->opcode
);
3668 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3670 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3674 case OPC1_16_SBRN_JNZ_T
:
3675 case OPC1_16_SBRN_JZ_T
:
3676 address
= MASK_OP_SBRN_DISP4(ctx
->opcode
);
3677 const16
= MASK_OP_SBRN_N(ctx
->opcode
);
3678 gen_compute_branch(ctx
, op1
, 0, 0, const16
, address
);
3681 case OPC1_16_SBR_JEQ2
:
3682 case OPC1_16_SBR_JNE2
:
3683 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
3684 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3685 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3686 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3688 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3691 case OPC1_16_SBR_JEQ
:
3692 case OPC1_16_SBR_JGEZ
:
3693 case OPC1_16_SBR_JGTZ
:
3694 case OPC1_16_SBR_JLEZ
:
3695 case OPC1_16_SBR_JLTZ
:
3696 case OPC1_16_SBR_JNE
:
3697 case OPC1_16_SBR_JNZ
:
3698 case OPC1_16_SBR_JNZ_A
:
3699 case OPC1_16_SBR_JZ
:
3700 case OPC1_16_SBR_JZ_A
:
3701 case OPC1_16_SBR_LOOP
:
3702 r1
= MASK_OP_SBR_S2(ctx
->opcode
);
3703 address
= MASK_OP_SBR_DISP4(ctx
->opcode
);
3704 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
3707 case OPC1_16_SC_AND
:
3708 case OPC1_16_SC_BISR
:
3709 case OPC1_16_SC_LD_A
:
3710 case OPC1_16_SC_LD_W
:
3711 case OPC1_16_SC_MOV
:
3713 case OPC1_16_SC_ST_A
:
3714 case OPC1_16_SC_ST_W
:
3715 case OPC1_16_SC_SUB_A
:
3716 decode_sc_opc(ctx
, op1
);
3719 case OPC1_16_SLR_LD_A
:
3720 case OPC1_16_SLR_LD_A_POSTINC
:
3721 case OPC1_16_SLR_LD_BU
:
3722 case OPC1_16_SLR_LD_BU_POSTINC
:
3723 case OPC1_16_SLR_LD_H
:
3724 case OPC1_16_SLR_LD_H_POSTINC
:
3725 case OPC1_16_SLR_LD_W
:
3726 case OPC1_16_SLR_LD_W_POSTINC
:
3727 decode_slr_opc(ctx
, op1
);
3730 case OPC1_16_SRO_LD_A
:
3731 case OPC1_16_SRO_LD_BU
:
3732 case OPC1_16_SRO_LD_H
:
3733 case OPC1_16_SRO_LD_W
:
3734 case OPC1_16_SRO_ST_A
:
3735 case OPC1_16_SRO_ST_B
:
3736 case OPC1_16_SRO_ST_H
:
3737 case OPC1_16_SRO_ST_W
:
3738 decode_sro_opc(ctx
, op1
);
3741 case OPC1_16_SSRO_ST_A
:
3742 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3743 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3744 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3746 case OPC1_16_SSRO_ST_B
:
3747 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3748 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3749 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
, MO_UB
);
3751 case OPC1_16_SSRO_ST_H
:
3752 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3753 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3754 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 2, MO_LESW
);
3756 case OPC1_16_SSRO_ST_W
:
3757 r1
= MASK_OP_SSRO_S1(ctx
->opcode
);
3758 const16
= MASK_OP_SSRO_OFF4(ctx
->opcode
);
3759 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[15], const16
* 4, MO_LESL
);
3762 case OPCM_16_SR_SYSTEM
:
3763 decode_sr_system(ctx
);
3765 case OPCM_16_SR_ACCU
:
3766 decode_sr_accu(ctx
);
3769 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3770 gen_compute_branch(ctx
, op1
, r1
, 0, 0, 0);
3772 case OPC1_16_SR_NOT
:
3773 r1
= MASK_OP_SR_S1D(ctx
->opcode
);
3774 tcg_gen_not_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
]);
3777 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3782 * 32 bit instructions
3786 static void decode_abs_ldw(DisasContext
*ctx
)
3793 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3794 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3795 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3797 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3800 case OPC2_32_ABS_LD_A
:
3801 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3803 case OPC2_32_ABS_LD_D
:
3805 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3807 case OPC2_32_ABS_LD_DA
:
3809 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3811 case OPC2_32_ABS_LD_W
:
3812 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3815 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3819 static void decode_abs_ldb(DisasContext
*ctx
)
3826 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3827 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3828 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3830 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3833 case OPC2_32_ABS_LD_B
:
3834 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_SB
);
3836 case OPC2_32_ABS_LD_BU
:
3837 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3839 case OPC2_32_ABS_LD_H
:
3840 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESW
);
3842 case OPC2_32_ABS_LD_HU
:
3843 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3846 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3850 static void decode_abs_ldst_swap(DisasContext
*ctx
)
3857 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3858 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3859 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3861 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3864 case OPC2_32_ABS_LDMST
:
3865 gen_ldmst(ctx
, r1
, temp
);
3867 case OPC2_32_ABS_SWAP_W
:
3868 gen_swap(ctx
, r1
, temp
);
3871 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3875 static void decode_abs_ldst_context(DisasContext
*ctx
)
3880 off18
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3881 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3884 case OPC2_32_ABS_LDLCX
:
3885 gen_helper_1arg(ldlcx
, EA_ABS_FORMAT(off18
));
3887 case OPC2_32_ABS_LDUCX
:
3888 gen_helper_1arg(lducx
, EA_ABS_FORMAT(off18
));
3890 case OPC2_32_ABS_STLCX
:
3891 gen_helper_1arg(stlcx
, EA_ABS_FORMAT(off18
));
3893 case OPC2_32_ABS_STUCX
:
3894 gen_helper_1arg(stucx
, EA_ABS_FORMAT(off18
));
3897 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3901 static void decode_abs_store(DisasContext
*ctx
)
3908 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3909 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3910 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3912 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3915 case OPC2_32_ABS_ST_A
:
3916 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3918 case OPC2_32_ABS_ST_D
:
3920 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
3922 case OPC2_32_ABS_ST_DA
:
3924 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
3926 case OPC2_32_ABS_ST_W
:
3927 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LESL
);
3930 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3934 static void decode_abs_storeb_h(DisasContext
*ctx
)
3941 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
3942 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
3943 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
3945 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
3948 case OPC2_32_ABS_ST_B
:
3949 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_UB
);
3951 case OPC2_32_ABS_ST_H
:
3952 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
3955 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
3961 static void decode_bit_andacc(DisasContext
*ctx
)
3967 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
3968 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
3969 r3
= MASK_OP_BIT_D(ctx
->opcode
);
3970 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
3971 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
3972 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
3976 case OPC2_32_BIT_AND_AND_T
:
3977 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3978 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_and_tl
);
3980 case OPC2_32_BIT_AND_ANDN_T
:
3981 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3982 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_and_tl
);
3984 case OPC2_32_BIT_AND_NOR_T
:
3985 if (TCG_TARGET_HAS_andc_i32
) {
3986 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3987 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_andc_tl
);
3989 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3990 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_and_tl
);
3993 case OPC2_32_BIT_AND_OR_T
:
3994 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
3995 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_and_tl
);
3998 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4002 static void decode_bit_logical_t(DisasContext
*ctx
)
4007 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4008 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4009 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4010 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4011 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4012 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4015 case OPC2_32_BIT_AND_T
:
4016 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4017 pos1
, pos2
, &tcg_gen_and_tl
);
4019 case OPC2_32_BIT_ANDN_T
:
4020 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4021 pos1
, pos2
, &tcg_gen_andc_tl
);
4023 case OPC2_32_BIT_NOR_T
:
4024 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4025 pos1
, pos2
, &tcg_gen_nor_tl
);
4027 case OPC2_32_BIT_OR_T
:
4028 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4029 pos1
, pos2
, &tcg_gen_or_tl
);
4032 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4036 static void decode_bit_insert(DisasContext
*ctx
)
4042 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4043 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4044 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4045 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4046 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4047 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4049 temp
= tcg_temp_new();
4051 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r2
], pos2
);
4052 if (op2
== OPC2_32_BIT_INSN_T
) {
4053 tcg_gen_not_tl(temp
, temp
);
4055 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, pos1
, 1);
4058 static void decode_bit_logical_t2(DisasContext
*ctx
)
4065 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4066 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4067 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4068 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4069 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4070 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4073 case OPC2_32_BIT_NAND_T
:
4074 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4075 pos1
, pos2
, &tcg_gen_nand_tl
);
4077 case OPC2_32_BIT_ORN_T
:
4078 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4079 pos1
, pos2
, &tcg_gen_orc_tl
);
4081 case OPC2_32_BIT_XNOR_T
:
4082 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4083 pos1
, pos2
, &tcg_gen_eqv_tl
);
4085 case OPC2_32_BIT_XOR_T
:
4086 gen_bit_1op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4087 pos1
, pos2
, &tcg_gen_xor_tl
);
4090 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4094 static void decode_bit_orand(DisasContext
*ctx
)
4101 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4102 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4103 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4104 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4105 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4106 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4109 case OPC2_32_BIT_OR_AND_T
:
4110 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4111 pos1
, pos2
, &tcg_gen_and_tl
, &tcg_gen_or_tl
);
4113 case OPC2_32_BIT_OR_ANDN_T
:
4114 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4115 pos1
, pos2
, &tcg_gen_andc_tl
, &tcg_gen_or_tl
);
4117 case OPC2_32_BIT_OR_NOR_T
:
4118 if (TCG_TARGET_HAS_orc_i32
) {
4119 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4120 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_orc_tl
);
4122 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4123 pos1
, pos2
, &tcg_gen_nor_tl
, &tcg_gen_or_tl
);
4126 case OPC2_32_BIT_OR_OR_T
:
4127 gen_bit_2op(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4128 pos1
, pos2
, &tcg_gen_or_tl
, &tcg_gen_or_tl
);
4131 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4135 static void decode_bit_sh_logic1(DisasContext
*ctx
)
4142 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4143 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4144 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4145 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4146 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4147 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4149 temp
= tcg_temp_new();
4152 case OPC2_32_BIT_SH_AND_T
:
4153 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4154 pos1
, pos2
, &tcg_gen_and_tl
);
4156 case OPC2_32_BIT_SH_ANDN_T
:
4157 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4158 pos1
, pos2
, &tcg_gen_andc_tl
);
4160 case OPC2_32_BIT_SH_NOR_T
:
4161 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4162 pos1
, pos2
, &tcg_gen_nor_tl
);
4164 case OPC2_32_BIT_SH_OR_T
:
4165 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4166 pos1
, pos2
, &tcg_gen_or_tl
);
4169 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4171 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4172 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4175 static void decode_bit_sh_logic2(DisasContext
*ctx
)
4182 op2
= MASK_OP_BIT_OP2(ctx
->opcode
);
4183 r1
= MASK_OP_BIT_S1(ctx
->opcode
);
4184 r2
= MASK_OP_BIT_S2(ctx
->opcode
);
4185 r3
= MASK_OP_BIT_D(ctx
->opcode
);
4186 pos1
= MASK_OP_BIT_POS1(ctx
->opcode
);
4187 pos2
= MASK_OP_BIT_POS2(ctx
->opcode
);
4189 temp
= tcg_temp_new();
4192 case OPC2_32_BIT_SH_NAND_T
:
4193 gen_bit_1op(temp
, cpu_gpr_d
[r1
] , cpu_gpr_d
[r2
] ,
4194 pos1
, pos2
, &tcg_gen_nand_tl
);
4196 case OPC2_32_BIT_SH_ORN_T
:
4197 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4198 pos1
, pos2
, &tcg_gen_orc_tl
);
4200 case OPC2_32_BIT_SH_XNOR_T
:
4201 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4202 pos1
, pos2
, &tcg_gen_eqv_tl
);
4204 case OPC2_32_BIT_SH_XOR_T
:
4205 gen_bit_1op(temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
4206 pos1
, pos2
, &tcg_gen_xor_tl
);
4209 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4211 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 1);
4212 tcg_gen_add_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], temp
);
4218 static void decode_bo_addrmode_post_pre_base(DisasContext
*ctx
)
4225 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4226 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4227 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4228 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4231 case OPC2_32_BO_CACHEA_WI_SHORTOFF
:
4232 case OPC2_32_BO_CACHEA_W_SHORTOFF
:
4233 case OPC2_32_BO_CACHEA_I_SHORTOFF
:
4234 /* instruction to access the cache */
4236 case OPC2_32_BO_CACHEA_WI_POSTINC
:
4237 case OPC2_32_BO_CACHEA_W_POSTINC
:
4238 case OPC2_32_BO_CACHEA_I_POSTINC
:
4239 /* instruction to access the cache, but we still need to handle
4240 the addressing mode */
4241 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4243 case OPC2_32_BO_CACHEA_WI_PREINC
:
4244 case OPC2_32_BO_CACHEA_W_PREINC
:
4245 case OPC2_32_BO_CACHEA_I_PREINC
:
4246 /* instruction to access the cache, but we still need to handle
4247 the addressing mode */
4248 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4250 case OPC2_32_BO_CACHEI_WI_SHORTOFF
:
4251 case OPC2_32_BO_CACHEI_W_SHORTOFF
:
4252 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
4253 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4256 case OPC2_32_BO_CACHEI_W_POSTINC
:
4257 case OPC2_32_BO_CACHEI_WI_POSTINC
:
4258 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4259 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4261 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4264 case OPC2_32_BO_CACHEI_W_PREINC
:
4265 case OPC2_32_BO_CACHEI_WI_PREINC
:
4266 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
4267 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4269 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4272 case OPC2_32_BO_ST_A_SHORTOFF
:
4273 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4275 case OPC2_32_BO_ST_A_POSTINC
:
4276 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4278 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4280 case OPC2_32_BO_ST_A_PREINC
:
4281 gen_st_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESL
);
4283 case OPC2_32_BO_ST_B_SHORTOFF
:
4284 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4286 case OPC2_32_BO_ST_B_POSTINC
:
4287 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4289 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4291 case OPC2_32_BO_ST_B_PREINC
:
4292 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4294 case OPC2_32_BO_ST_D_SHORTOFF
:
4296 gen_offset_st_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4299 case OPC2_32_BO_ST_D_POSTINC
:
4301 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4302 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4304 case OPC2_32_BO_ST_D_PREINC
:
4306 temp
= tcg_temp_new();
4307 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4308 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4309 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4311 case OPC2_32_BO_ST_DA_SHORTOFF
:
4313 gen_offset_st_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4316 case OPC2_32_BO_ST_DA_POSTINC
:
4318 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4319 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4321 case OPC2_32_BO_ST_DA_PREINC
:
4323 temp
= tcg_temp_new();
4324 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4325 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4326 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4328 case OPC2_32_BO_ST_H_SHORTOFF
:
4329 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4331 case OPC2_32_BO_ST_H_POSTINC
:
4332 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4334 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4336 case OPC2_32_BO_ST_H_PREINC
:
4337 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4339 case OPC2_32_BO_ST_Q_SHORTOFF
:
4340 temp
= tcg_temp_new();
4341 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4342 gen_offset_st(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4344 case OPC2_32_BO_ST_Q_POSTINC
:
4345 temp
= tcg_temp_new();
4346 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4347 tcg_gen_qemu_st_tl(temp
, cpu_gpr_a
[r2
], ctx
->mem_idx
,
4349 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4351 case OPC2_32_BO_ST_Q_PREINC
:
4352 temp
= tcg_temp_new();
4353 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4354 gen_st_preincr(ctx
, temp
, cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4356 case OPC2_32_BO_ST_W_SHORTOFF
:
4357 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4359 case OPC2_32_BO_ST_W_POSTINC
:
4360 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4362 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4364 case OPC2_32_BO_ST_W_PREINC
:
4365 gen_st_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4368 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4372 static void decode_bo_addrmode_bitreverse_circular(DisasContext
*ctx
)
4377 TCGv temp
, temp2
, t_off10
;
4379 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4380 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4381 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4382 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4384 temp
= tcg_temp_new();
4385 temp2
= tcg_temp_new();
4386 t_off10
= tcg_constant_i32(off10
);
4388 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4389 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4392 case OPC2_32_BO_CACHEA_WI_BR
:
4393 case OPC2_32_BO_CACHEA_W_BR
:
4394 case OPC2_32_BO_CACHEA_I_BR
:
4395 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4397 case OPC2_32_BO_CACHEA_WI_CIRC
:
4398 case OPC2_32_BO_CACHEA_W_CIRC
:
4399 case OPC2_32_BO_CACHEA_I_CIRC
:
4400 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4402 case OPC2_32_BO_ST_A_BR
:
4403 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4404 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4406 case OPC2_32_BO_ST_A_CIRC
:
4407 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4408 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4410 case OPC2_32_BO_ST_B_BR
:
4411 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4412 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4414 case OPC2_32_BO_ST_B_CIRC
:
4415 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4416 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4418 case OPC2_32_BO_ST_D_BR
:
4420 gen_st_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4421 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4423 case OPC2_32_BO_ST_D_CIRC
:
4425 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4426 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4427 tcg_gen_addi_tl(temp
, temp
, 4);
4428 tcg_gen_rem_tl(temp
, temp
, temp2
);
4429 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4430 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4431 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4433 case OPC2_32_BO_ST_DA_BR
:
4435 gen_st_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4436 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4438 case OPC2_32_BO_ST_DA_CIRC
:
4440 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4441 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4442 tcg_gen_addi_tl(temp
, temp
, 4);
4443 tcg_gen_rem_tl(temp
, temp
, temp2
);
4444 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4445 tcg_gen_qemu_st_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4446 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4448 case OPC2_32_BO_ST_H_BR
:
4449 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4450 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4452 case OPC2_32_BO_ST_H_CIRC
:
4453 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4454 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4456 case OPC2_32_BO_ST_Q_BR
:
4457 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4458 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4459 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4461 case OPC2_32_BO_ST_Q_CIRC
:
4462 tcg_gen_shri_tl(temp
, cpu_gpr_d
[r1
], 16);
4463 tcg_gen_qemu_st_tl(temp
, temp2
, ctx
->mem_idx
, MO_LEUW
);
4464 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4466 case OPC2_32_BO_ST_W_BR
:
4467 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4468 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4470 case OPC2_32_BO_ST_W_CIRC
:
4471 tcg_gen_qemu_st_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4472 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4475 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4479 static void decode_bo_addrmode_ld_post_pre_base(DisasContext
*ctx
)
4486 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4487 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4488 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4489 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4492 case OPC2_32_BO_LD_A_SHORTOFF
:
4493 gen_offset_ld(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4495 case OPC2_32_BO_LD_A_POSTINC
:
4496 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4498 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4500 case OPC2_32_BO_LD_A_PREINC
:
4501 gen_ld_preincr(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4503 case OPC2_32_BO_LD_B_SHORTOFF
:
4504 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4506 case OPC2_32_BO_LD_B_POSTINC
:
4507 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4509 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4511 case OPC2_32_BO_LD_B_PREINC
:
4512 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_SB
);
4514 case OPC2_32_BO_LD_BU_SHORTOFF
:
4515 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4517 case OPC2_32_BO_LD_BU_POSTINC
:
4518 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4520 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4522 case OPC2_32_BO_LD_BU_PREINC
:
4523 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_UB
);
4525 case OPC2_32_BO_LD_D_SHORTOFF
:
4527 gen_offset_ld_2regs(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
],
4530 case OPC2_32_BO_LD_D_POSTINC
:
4532 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
);
4533 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4535 case OPC2_32_BO_LD_D_PREINC
:
4537 temp
= tcg_temp_new();
4538 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4539 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp
, ctx
);
4540 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4542 case OPC2_32_BO_LD_DA_SHORTOFF
:
4544 gen_offset_ld_2regs(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
],
4547 case OPC2_32_BO_LD_DA_POSTINC
:
4549 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], ctx
);
4550 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4552 case OPC2_32_BO_LD_DA_PREINC
:
4554 temp
= tcg_temp_new();
4555 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4556 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp
, ctx
);
4557 tcg_gen_mov_tl(cpu_gpr_a
[r2
], temp
);
4559 case OPC2_32_BO_LD_H_SHORTOFF
:
4560 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4562 case OPC2_32_BO_LD_H_POSTINC
:
4563 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4565 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4567 case OPC2_32_BO_LD_H_PREINC
:
4568 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LESW
);
4570 case OPC2_32_BO_LD_HU_SHORTOFF
:
4571 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4573 case OPC2_32_BO_LD_HU_POSTINC
:
4574 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4576 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4578 case OPC2_32_BO_LD_HU_PREINC
:
4579 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4581 case OPC2_32_BO_LD_Q_SHORTOFF
:
4582 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4583 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4585 case OPC2_32_BO_LD_Q_POSTINC
:
4586 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4588 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4589 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4591 case OPC2_32_BO_LD_Q_PREINC
:
4592 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUW
);
4593 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4595 case OPC2_32_BO_LD_W_SHORTOFF
:
4596 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4598 case OPC2_32_BO_LD_W_POSTINC
:
4599 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], ctx
->mem_idx
,
4601 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4603 case OPC2_32_BO_LD_W_PREINC
:
4604 gen_ld_preincr(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], off10
, MO_LEUL
);
4607 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4611 static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext
*ctx
)
4616 TCGv temp
, temp2
, t_off10
;
4618 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4619 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4620 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4621 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4623 temp
= tcg_temp_new();
4624 temp2
= tcg_temp_new();
4625 t_off10
= tcg_constant_i32(off10
);
4627 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4628 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4632 case OPC2_32_BO_LD_A_BR
:
4633 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4634 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4636 case OPC2_32_BO_LD_A_CIRC
:
4637 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4638 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4640 case OPC2_32_BO_LD_B_BR
:
4641 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4642 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4644 case OPC2_32_BO_LD_B_CIRC
:
4645 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_SB
);
4646 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4648 case OPC2_32_BO_LD_BU_BR
:
4649 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4650 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4652 case OPC2_32_BO_LD_BU_CIRC
:
4653 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_UB
);
4654 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4656 case OPC2_32_BO_LD_D_BR
:
4658 gen_ld_2regs_64(cpu_gpr_d
[r1
+1], cpu_gpr_d
[r1
], temp2
, ctx
);
4659 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4661 case OPC2_32_BO_LD_D_CIRC
:
4663 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4664 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4665 tcg_gen_addi_tl(temp
, temp
, 4);
4666 tcg_gen_rem_tl(temp
, temp
, temp2
);
4667 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4668 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4669 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4671 case OPC2_32_BO_LD_DA_BR
:
4673 gen_ld_2regs_64(cpu_gpr_a
[r1
+1], cpu_gpr_a
[r1
], temp2
, ctx
);
4674 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4676 case OPC2_32_BO_LD_DA_CIRC
:
4678 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4679 tcg_gen_shri_tl(temp2
, cpu_gpr_a
[r2
+1], 16);
4680 tcg_gen_addi_tl(temp
, temp
, 4);
4681 tcg_gen_rem_tl(temp
, temp
, temp2
);
4682 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4683 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
+1], temp2
, ctx
->mem_idx
, MO_LEUL
);
4684 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4686 case OPC2_32_BO_LD_H_BR
:
4687 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4688 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4690 case OPC2_32_BO_LD_H_CIRC
:
4691 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LESW
);
4692 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4694 case OPC2_32_BO_LD_HU_BR
:
4695 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4696 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4698 case OPC2_32_BO_LD_HU_CIRC
:
4699 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4700 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4702 case OPC2_32_BO_LD_Q_BR
:
4703 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4704 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4705 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4707 case OPC2_32_BO_LD_Q_CIRC
:
4708 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUW
);
4709 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
4710 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4712 case OPC2_32_BO_LD_W_BR
:
4713 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4714 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4716 case OPC2_32_BO_LD_W_CIRC
:
4717 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp2
, ctx
->mem_idx
, MO_LEUL
);
4718 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4721 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4725 static void decode_bo_addrmode_stctx_post_pre_base(DisasContext
*ctx
)
4733 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4734 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4735 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4736 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4739 temp
= tcg_temp_new();
4742 case OPC2_32_BO_LDLCX_SHORTOFF
:
4743 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4744 gen_helper_ldlcx(tcg_env
, temp
);
4746 case OPC2_32_BO_LDMST_SHORTOFF
:
4747 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4748 gen_ldmst(ctx
, r1
, temp
);
4750 case OPC2_32_BO_LDMST_POSTINC
:
4751 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4752 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4754 case OPC2_32_BO_LDMST_PREINC
:
4755 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4756 gen_ldmst(ctx
, r1
, cpu_gpr_a
[r2
]);
4758 case OPC2_32_BO_LDUCX_SHORTOFF
:
4759 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4760 gen_helper_lducx(tcg_env
, temp
);
4762 case OPC2_32_BO_LEA_SHORTOFF
:
4763 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], off10
);
4765 case OPC2_32_BO_STLCX_SHORTOFF
:
4766 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4767 gen_helper_stlcx(tcg_env
, temp
);
4769 case OPC2_32_BO_STUCX_SHORTOFF
:
4770 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4771 gen_helper_stucx(tcg_env
, temp
);
4773 case OPC2_32_BO_SWAP_W_SHORTOFF
:
4774 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4775 gen_swap(ctx
, r1
, temp
);
4777 case OPC2_32_BO_SWAP_W_POSTINC
:
4778 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4779 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4781 case OPC2_32_BO_SWAP_W_PREINC
:
4782 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4783 gen_swap(ctx
, r1
, cpu_gpr_a
[r2
]);
4785 case OPC2_32_BO_CMPSWAP_W_SHORTOFF
:
4786 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4787 gen_cmpswap(ctx
, r1
, temp
);
4789 case OPC2_32_BO_CMPSWAP_W_POSTINC
:
4790 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4791 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4793 case OPC2_32_BO_CMPSWAP_W_PREINC
:
4794 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4795 gen_cmpswap(ctx
, r1
, cpu_gpr_a
[r2
]);
4797 case OPC2_32_BO_SWAPMSK_W_SHORTOFF
:
4798 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], off10
);
4799 gen_swapmsk(ctx
, r1
, temp
);
4801 case OPC2_32_BO_SWAPMSK_W_POSTINC
:
4802 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4803 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4805 case OPC2_32_BO_SWAPMSK_W_PREINC
:
4806 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r2
], off10
);
4807 gen_swapmsk(ctx
, r1
, cpu_gpr_a
[r2
]);
4810 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4814 static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext
*ctx
)
4819 TCGv temp
, temp2
, t_off10
;
4821 r1
= MASK_OP_BO_S1D(ctx
->opcode
);
4822 r2
= MASK_OP_BO_S2(ctx
->opcode
);
4823 off10
= MASK_OP_BO_OFF10_SEXT(ctx
->opcode
);
4824 op2
= MASK_OP_BO_OP2(ctx
->opcode
);
4826 temp
= tcg_temp_new();
4827 temp2
= tcg_temp_new();
4828 t_off10
= tcg_constant_i32(off10
);
4830 tcg_gen_ext16u_tl(temp
, cpu_gpr_a
[r2
+1]);
4831 tcg_gen_add_tl(temp2
, cpu_gpr_a
[r2
], temp
);
4834 case OPC2_32_BO_LDMST_BR
:
4835 gen_ldmst(ctx
, r1
, temp2
);
4836 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4838 case OPC2_32_BO_LDMST_CIRC
:
4839 gen_ldmst(ctx
, r1
, temp2
);
4840 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4842 case OPC2_32_BO_SWAP_W_BR
:
4843 gen_swap(ctx
, r1
, temp2
);
4844 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4846 case OPC2_32_BO_SWAP_W_CIRC
:
4847 gen_swap(ctx
, r1
, temp2
);
4848 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4850 case OPC2_32_BO_CMPSWAP_W_BR
:
4851 gen_cmpswap(ctx
, r1
, temp2
);
4852 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4854 case OPC2_32_BO_CMPSWAP_W_CIRC
:
4855 gen_cmpswap(ctx
, r1
, temp2
);
4856 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4858 case OPC2_32_BO_SWAPMSK_W_BR
:
4859 gen_swapmsk(ctx
, r1
, temp2
);
4860 gen_helper_br_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1]);
4862 case OPC2_32_BO_SWAPMSK_W_CIRC
:
4863 gen_swapmsk(ctx
, r1
, temp2
);
4864 gen_helper_circ_update(cpu_gpr_a
[r2
+1], cpu_gpr_a
[r2
+1], t_off10
);
4867 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4871 static void decode_bol_opc(DisasContext
*ctx
, int32_t op1
)
4877 r1
= MASK_OP_BOL_S1D(ctx
->opcode
);
4878 r2
= MASK_OP_BOL_S2(ctx
->opcode
);
4879 address
= MASK_OP_BOL_OFF16_SEXT(ctx
->opcode
);
4882 case OPC1_32_BOL_LD_A_LONGOFF
:
4883 temp
= tcg_temp_new();
4884 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4885 tcg_gen_qemu_ld_tl(cpu_gpr_a
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4887 case OPC1_32_BOL_LD_W_LONGOFF
:
4888 temp
= tcg_temp_new();
4889 tcg_gen_addi_tl(temp
, cpu_gpr_a
[r2
], address
);
4890 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUL
);
4892 case OPC1_32_BOL_LEA_LONGOFF
:
4893 tcg_gen_addi_tl(cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
);
4895 case OPC1_32_BOL_ST_A_LONGOFF
:
4896 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4897 gen_offset_st(ctx
, cpu_gpr_a
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4899 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4902 case OPC1_32_BOL_ST_W_LONGOFF
:
4903 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUL
);
4905 case OPC1_32_BOL_LD_B_LONGOFF
:
4906 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4907 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4909 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4912 case OPC1_32_BOL_LD_BU_LONGOFF
:
4913 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4914 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_UB
);
4916 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4919 case OPC1_32_BOL_LD_H_LONGOFF
:
4920 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4921 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4923 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4926 case OPC1_32_BOL_LD_HU_LONGOFF
:
4927 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4928 gen_offset_ld(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LEUW
);
4930 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4933 case OPC1_32_BOL_ST_B_LONGOFF
:
4934 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4935 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_SB
);
4937 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4940 case OPC1_32_BOL_ST_H_LONGOFF
:
4941 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
4942 gen_offset_st(ctx
, cpu_gpr_d
[r1
], cpu_gpr_a
[r2
], address
, MO_LESW
);
4944 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4948 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
4953 static void decode_rc_logical_shift(DisasContext
*ctx
)
4960 r2
= MASK_OP_RC_D(ctx
->opcode
);
4961 r1
= MASK_OP_RC_S1(ctx
->opcode
);
4962 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
4963 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
4966 case OPC2_32_RC_AND
:
4967 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4969 case OPC2_32_RC_ANDN
:
4970 tcg_gen_andi_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4972 case OPC2_32_RC_NAND
:
4973 temp
= tcg_temp_new();
4974 tcg_gen_movi_tl(temp
, const9
);
4975 tcg_gen_nand_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4977 case OPC2_32_RC_NOR
:
4978 temp
= tcg_temp_new();
4979 tcg_gen_movi_tl(temp
, const9
);
4980 tcg_gen_nor_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
4983 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4985 case OPC2_32_RC_ORN
:
4986 tcg_gen_ori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], ~const9
);
4989 const9
= sextract32(const9
, 0, 6);
4990 gen_shi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4992 case OPC2_32_RC_SH_H
:
4993 const9
= sextract32(const9
, 0, 5);
4994 gen_sh_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
4996 case OPC2_32_RC_SHA
:
4997 const9
= sextract32(const9
, 0, 6);
4998 gen_shaci(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5000 case OPC2_32_RC_SHA_H
:
5001 const9
= sextract32(const9
, 0, 5);
5002 gen_sha_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5004 case OPC2_32_RC_SHAS
:
5005 gen_shasi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5007 case OPC2_32_RC_XNOR
:
5008 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5009 tcg_gen_not_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
]);
5011 case OPC2_32_RC_XOR
:
5012 tcg_gen_xori_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5014 case OPC2_32_RC_SHUFFLE
:
5015 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
5016 temp
= tcg_constant_i32(const9
);
5017 gen_helper_shuffle(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
);
5019 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5023 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5027 static void decode_rc_accumulator(DisasContext
*ctx
)
5035 r2
= MASK_OP_RC_D(ctx
->opcode
);
5036 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5037 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5039 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5041 temp
= tcg_temp_new();
5044 case OPC2_32_RC_ABSDIF
:
5045 gen_absdifi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5047 case OPC2_32_RC_ABSDIFS
:
5048 gen_absdifsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5050 case OPC2_32_RC_ADD
:
5051 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5053 case OPC2_32_RC_ADDC
:
5054 gen_addci_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5056 case OPC2_32_RC_ADDS
:
5057 gen_addsi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5059 case OPC2_32_RC_ADDS_U
:
5060 gen_addsui(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5062 case OPC2_32_RC_ADDX
:
5063 gen_addi_CC(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5065 case OPC2_32_RC_AND_EQ
:
5066 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5067 const9
, &tcg_gen_and_tl
);
5069 case OPC2_32_RC_AND_GE
:
5070 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5071 const9
, &tcg_gen_and_tl
);
5073 case OPC2_32_RC_AND_GE_U
:
5074 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5075 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5076 const9
, &tcg_gen_and_tl
);
5078 case OPC2_32_RC_AND_LT
:
5079 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5080 const9
, &tcg_gen_and_tl
);
5082 case OPC2_32_RC_AND_LT_U
:
5083 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5084 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5085 const9
, &tcg_gen_and_tl
);
5087 case OPC2_32_RC_AND_NE
:
5088 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5089 const9
, &tcg_gen_and_tl
);
5092 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5094 case OPC2_32_RC_EQANY_B
:
5095 gen_eqany_bi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5097 case OPC2_32_RC_EQANY_H
:
5098 gen_eqany_hi(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5101 tcg_gen_setcondi_tl(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5103 case OPC2_32_RC_GE_U
:
5104 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5105 tcg_gen_setcondi_tl(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5108 tcg_gen_setcondi_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5110 case OPC2_32_RC_LT_U
:
5111 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5112 tcg_gen_setcondi_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5114 case OPC2_32_RC_MAX
:
5115 tcg_gen_movi_tl(temp
, const9
);
5116 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5117 cpu_gpr_d
[r1
], temp
);
5119 case OPC2_32_RC_MAX_U
:
5120 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5121 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5122 cpu_gpr_d
[r1
], temp
);
5124 case OPC2_32_RC_MIN
:
5125 tcg_gen_movi_tl(temp
, const9
);
5126 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5127 cpu_gpr_d
[r1
], temp
);
5129 case OPC2_32_RC_MIN_U
:
5130 tcg_gen_movi_tl(temp
, MASK_OP_RC_CONST9(ctx
->opcode
));
5131 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
,
5132 cpu_gpr_d
[r1
], temp
);
5135 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5137 case OPC2_32_RC_OR_EQ
:
5138 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5139 const9
, &tcg_gen_or_tl
);
5141 case OPC2_32_RC_OR_GE
:
5142 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5143 const9
, &tcg_gen_or_tl
);
5145 case OPC2_32_RC_OR_GE_U
:
5146 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5147 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5148 const9
, &tcg_gen_or_tl
);
5150 case OPC2_32_RC_OR_LT
:
5151 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5152 const9
, &tcg_gen_or_tl
);
5154 case OPC2_32_RC_OR_LT_U
:
5155 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5156 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5157 const9
, &tcg_gen_or_tl
);
5159 case OPC2_32_RC_OR_NE
:
5160 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5161 const9
, &tcg_gen_or_tl
);
5163 case OPC2_32_RC_RSUB
:
5164 tcg_gen_movi_tl(temp
, const9
);
5165 gen_sub_d(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5167 case OPC2_32_RC_RSUBS
:
5168 tcg_gen_movi_tl(temp
, const9
);
5169 gen_subs(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5171 case OPC2_32_RC_RSUBS_U
:
5172 tcg_gen_movi_tl(temp
, const9
);
5173 gen_subsu(cpu_gpr_d
[r2
], temp
, cpu_gpr_d
[r1
]);
5175 case OPC2_32_RC_SH_EQ
:
5176 gen_sh_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5178 case OPC2_32_RC_SH_GE
:
5179 gen_sh_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5181 case OPC2_32_RC_SH_GE_U
:
5182 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5183 gen_sh_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5185 case OPC2_32_RC_SH_LT
:
5186 gen_sh_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5188 case OPC2_32_RC_SH_LT_U
:
5189 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5190 gen_sh_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5192 case OPC2_32_RC_SH_NE
:
5193 gen_sh_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5195 case OPC2_32_RC_XOR_EQ
:
5196 gen_accumulating_condi(TCG_COND_EQ
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5197 const9
, &tcg_gen_xor_tl
);
5199 case OPC2_32_RC_XOR_GE
:
5200 gen_accumulating_condi(TCG_COND_GE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5201 const9
, &tcg_gen_xor_tl
);
5203 case OPC2_32_RC_XOR_GE_U
:
5204 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5205 gen_accumulating_condi(TCG_COND_GEU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5206 const9
, &tcg_gen_xor_tl
);
5208 case OPC2_32_RC_XOR_LT
:
5209 gen_accumulating_condi(TCG_COND_LT
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5210 const9
, &tcg_gen_xor_tl
);
5212 case OPC2_32_RC_XOR_LT_U
:
5213 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5214 gen_accumulating_condi(TCG_COND_LTU
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5215 const9
, &tcg_gen_xor_tl
);
5217 case OPC2_32_RC_XOR_NE
:
5218 gen_accumulating_condi(TCG_COND_NE
, cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
5219 const9
, &tcg_gen_xor_tl
);
5222 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5226 static void decode_rc_serviceroutine(DisasContext
*ctx
)
5231 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5232 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5235 case OPC2_32_RC_BISR
:
5236 if (ctx
->priv
== TRICORE_PRIV_SM
) {
5237 gen_helper_1arg(bisr
, const9
);
5239 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
5242 case OPC2_32_RC_SYSCALL
:
5243 generate_trap(ctx
, TRAPC_SYSCALL
, const9
& 0xff);
5246 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5250 static void decode_rc_mul(DisasContext
*ctx
)
5256 r2
= MASK_OP_RC_D(ctx
->opcode
);
5257 r1
= MASK_OP_RC_S1(ctx
->opcode
);
5258 const9
= MASK_OP_RC_CONST9_SEXT(ctx
->opcode
);
5260 op2
= MASK_OP_RC_OP2(ctx
->opcode
);
5263 case OPC2_32_RC_MUL_32
:
5264 gen_muli_i32s(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5266 case OPC2_32_RC_MUL_64
:
5268 gen_muli_i64s(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5270 case OPC2_32_RC_MULS_32
:
5271 gen_mulsi_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5273 case OPC2_32_RC_MUL_U_64
:
5274 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5276 gen_muli_i64u(cpu_gpr_d
[r2
], cpu_gpr_d
[r2
+1], cpu_gpr_d
[r1
], const9
);
5278 case OPC2_32_RC_MULS_U_32
:
5279 const9
= MASK_OP_RC_CONST9(ctx
->opcode
);
5280 gen_mulsui_i32(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const9
);
5283 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5288 static void decode_rcpw_insert(DisasContext
*ctx
)
5292 int32_t pos
, width
, const4
;
5296 op2
= MASK_OP_RCPW_OP2(ctx
->opcode
);
5297 r1
= MASK_OP_RCPW_S1(ctx
->opcode
);
5298 r2
= MASK_OP_RCPW_D(ctx
->opcode
);
5299 const4
= MASK_OP_RCPW_CONST4(ctx
->opcode
);
5300 width
= MASK_OP_RCPW_WIDTH(ctx
->opcode
);
5301 pos
= MASK_OP_RCPW_POS(ctx
->opcode
);
5304 case OPC2_32_RCPW_IMASK
:
5306 /* if pos + width > 32 undefined result */
5307 if (pos
+ width
<= 32) {
5308 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], ((1u << width
) - 1) << pos
);
5309 tcg_gen_movi_tl(cpu_gpr_d
[r2
], (const4
<< pos
));
5312 case OPC2_32_RCPW_INSERT
:
5313 /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
5315 tcg_gen_mov_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
]);
5316 /* if pos + width > 32 undefined result */
5317 } else if (pos
+ width
<= 32) {
5318 temp
= tcg_constant_i32(const4
);
5319 tcg_gen_deposit_tl(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], temp
, pos
, width
);
5323 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5329 static void decode_rcrw_insert(DisasContext
*ctx
)
5333 int32_t width
, const4
;
5335 TCGv temp
, temp2
, temp3
;
5337 op2
= MASK_OP_RCRW_OP2(ctx
->opcode
);
5338 r1
= MASK_OP_RCRW_S1(ctx
->opcode
);
5339 r3
= MASK_OP_RCRW_S3(ctx
->opcode
);
5340 r4
= MASK_OP_RCRW_D(ctx
->opcode
);
5341 width
= MASK_OP_RCRW_WIDTH(ctx
->opcode
);
5342 const4
= MASK_OP_RCRW_CONST4(ctx
->opcode
);
5344 temp
= tcg_temp_new();
5345 temp2
= tcg_temp_new();
5348 case OPC2_32_RCRW_IMASK
:
5350 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
5351 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
5352 tcg_gen_shl_tl(cpu_gpr_d
[r4
+ 1], temp2
, temp
);
5353 tcg_gen_movi_tl(temp2
, const4
);
5354 tcg_gen_shl_tl(cpu_gpr_d
[r4
], temp2
, temp
);
5356 case OPC2_32_RCRW_INSERT
:
5357 temp3
= tcg_temp_new();
5359 tcg_gen_movi_tl(temp
, width
);
5360 tcg_gen_movi_tl(temp2
, const4
);
5361 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r3
], 0x1f);
5362 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp2
, temp
, temp3
);
5365 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5371 static void decode_rcr_cond_select(DisasContext
*ctx
)
5379 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5380 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5381 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5382 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5383 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5386 case OPC2_32_RCR_CADD
:
5387 gen_condi_add(TCG_COND_NE
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5390 case OPC2_32_RCR_CADDN
:
5391 gen_condi_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], const9
, cpu_gpr_d
[r4
],
5394 case OPC2_32_RCR_SEL
:
5395 temp
= tcg_constant_i32(0);
5396 temp2
= tcg_constant_i32(const9
);
5397 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5398 cpu_gpr_d
[r1
], temp2
);
5400 case OPC2_32_RCR_SELN
:
5401 temp
= tcg_constant_i32(0);
5402 temp2
= tcg_constant_i32(const9
);
5403 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
5404 cpu_gpr_d
[r1
], temp2
);
5407 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5411 static void decode_rcr_madd(DisasContext
*ctx
)
5418 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5419 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5420 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5421 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5422 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5425 case OPC2_32_RCR_MADD_32
:
5426 gen_maddi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5428 case OPC2_32_RCR_MADD_64
:
5431 gen_maddi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5432 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5434 case OPC2_32_RCR_MADDS_32
:
5435 gen_maddsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5437 case OPC2_32_RCR_MADDS_64
:
5440 gen_maddsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5441 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5443 case OPC2_32_RCR_MADD_U_64
:
5446 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5447 gen_maddui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5448 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5450 case OPC2_32_RCR_MADDS_U_32
:
5451 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5452 gen_maddsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5454 case OPC2_32_RCR_MADDS_U_64
:
5457 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5458 gen_maddsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5459 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5462 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5466 static void decode_rcr_msub(DisasContext
*ctx
)
5473 op2
= MASK_OP_RCR_OP2(ctx
->opcode
);
5474 r1
= MASK_OP_RCR_S1(ctx
->opcode
);
5475 const9
= MASK_OP_RCR_CONST9_SEXT(ctx
->opcode
);
5476 r3
= MASK_OP_RCR_S3(ctx
->opcode
);
5477 r4
= MASK_OP_RCR_D(ctx
->opcode
);
5480 case OPC2_32_RCR_MSUB_32
:
5481 gen_msubi32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5483 case OPC2_32_RCR_MSUB_64
:
5486 gen_msubi64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5487 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5489 case OPC2_32_RCR_MSUBS_32
:
5490 gen_msubsi_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5492 case OPC2_32_RCR_MSUBS_64
:
5495 gen_msubsi_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5496 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5498 case OPC2_32_RCR_MSUB_U_64
:
5501 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5502 gen_msubui64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5503 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5505 case OPC2_32_RCR_MSUBS_U_32
:
5506 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5507 gen_msubsui_32(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
], const9
);
5509 case OPC2_32_RCR_MSUBS_U_64
:
5512 const9
= MASK_OP_RCR_CONST9(ctx
->opcode
);
5513 gen_msubsui_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
5514 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], const9
);
5517 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5523 static void decode_rlc_opc(DisasContext
*ctx
,
5529 const16
= MASK_OP_RLC_CONST16_SEXT(ctx
->opcode
);
5530 r1
= MASK_OP_RLC_S1(ctx
->opcode
);
5531 r2
= MASK_OP_RLC_D(ctx
->opcode
);
5534 case OPC1_32_RLC_ADDI
:
5535 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
);
5537 case OPC1_32_RLC_ADDIH
:
5538 gen_addi_d(cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], const16
<< 16);
5540 case OPC1_32_RLC_ADDIH_A
:
5541 tcg_gen_addi_tl(cpu_gpr_a
[r2
], cpu_gpr_a
[r1
], const16
<< 16);
5543 case OPC1_32_RLC_MFCR
:
5544 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5545 gen_mfcr(ctx
, cpu_gpr_d
[r2
], const16
);
5547 case OPC1_32_RLC_MOV
:
5548 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5550 case OPC1_32_RLC_MOV_64
:
5551 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5553 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5554 tcg_gen_movi_tl(cpu_gpr_d
[r2
+1], const16
>> 15);
5556 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5559 case OPC1_32_RLC_MOV_U
:
5560 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5561 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
);
5563 case OPC1_32_RLC_MOV_H
:
5564 tcg_gen_movi_tl(cpu_gpr_d
[r2
], const16
<< 16);
5566 case OPC1_32_RLC_MOVH_A
:
5567 tcg_gen_movi_tl(cpu_gpr_a
[r2
], const16
<< 16);
5569 case OPC1_32_RLC_MTCR
:
5570 const16
= MASK_OP_RLC_CONST16(ctx
->opcode
);
5571 gen_mtcr(ctx
, cpu_gpr_d
[r1
], const16
);
5574 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5579 static void decode_rr_accumulator(DisasContext
*ctx
)
5586 r3
= MASK_OP_RR_D(ctx
->opcode
);
5587 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5588 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5589 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5592 case OPC2_32_RR_ABS
:
5593 gen_abs(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5595 case OPC2_32_RR_ABS_B
:
5596 gen_helper_abs_b(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r2
]);
5598 case OPC2_32_RR_ABS_H
:
5599 gen_helper_abs_h(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r2
]);
5601 case OPC2_32_RR_ABSDIF
:
5602 gen_absdif(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5604 case OPC2_32_RR_ABSDIF_B
:
5605 gen_helper_absdif_b(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5608 case OPC2_32_RR_ABSDIF_H
:
5609 gen_helper_absdif_h(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5612 case OPC2_32_RR_ABSDIFS
:
5613 gen_helper_absdif_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5616 case OPC2_32_RR_ABSDIFS_H
:
5617 gen_helper_absdif_h_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5620 case OPC2_32_RR_ABSS
:
5621 gen_helper_abs_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r2
]);
5623 case OPC2_32_RR_ABSS_H
:
5624 gen_helper_abs_h_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r2
]);
5626 case OPC2_32_RR_ADD
:
5627 gen_add_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5629 case OPC2_32_RR_ADD_B
:
5630 gen_helper_add_b(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5632 case OPC2_32_RR_ADD_H
:
5633 gen_helper_add_h(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5635 case OPC2_32_RR_ADDC
:
5636 gen_addc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5638 case OPC2_32_RR_ADDS
:
5639 gen_adds(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5641 case OPC2_32_RR_ADDS_H
:
5642 gen_helper_add_h_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5645 case OPC2_32_RR_ADDS_HU
:
5646 gen_helper_add_h_suov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5649 case OPC2_32_RR_ADDS_U
:
5650 gen_helper_add_suov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5653 case OPC2_32_RR_ADDX
:
5654 gen_add_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5656 case OPC2_32_RR_AND_EQ
:
5657 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5658 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5660 case OPC2_32_RR_AND_GE
:
5661 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5662 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5664 case OPC2_32_RR_AND_GE_U
:
5665 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5666 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5668 case OPC2_32_RR_AND_LT
:
5669 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5670 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5672 case OPC2_32_RR_AND_LT_U
:
5673 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5674 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5676 case OPC2_32_RR_AND_NE
:
5677 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5678 cpu_gpr_d
[r2
], &tcg_gen_and_tl
);
5681 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5684 case OPC2_32_RR_EQ_B
:
5685 gen_helper_eq_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5687 case OPC2_32_RR_EQ_H
:
5688 gen_helper_eq_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5690 case OPC2_32_RR_EQ_W
:
5691 tcg_gen_negsetcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
],
5692 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5694 case OPC2_32_RR_EQANY_B
:
5695 gen_helper_eqany_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5697 case OPC2_32_RR_EQANY_H
:
5698 gen_helper_eqany_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5701 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5704 case OPC2_32_RR_GE_U
:
5705 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5709 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5712 case OPC2_32_RR_LT_U
:
5713 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5716 case OPC2_32_RR_LT_B
:
5717 gen_helper_lt_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5719 case OPC2_32_RR_LT_BU
:
5720 gen_helper_lt_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5722 case OPC2_32_RR_LT_H
:
5723 gen_helper_lt_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5725 case OPC2_32_RR_LT_HU
:
5726 gen_helper_lt_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5728 case OPC2_32_RR_LT_W
:
5729 tcg_gen_negsetcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
],
5730 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5732 case OPC2_32_RR_LT_WU
:
5733 tcg_gen_negsetcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
],
5734 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5736 case OPC2_32_RR_MAX
:
5737 tcg_gen_movcond_tl(TCG_COND_GT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5738 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5740 case OPC2_32_RR_MAX_U
:
5741 tcg_gen_movcond_tl(TCG_COND_GTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5742 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5744 case OPC2_32_RR_MAX_B
:
5745 gen_helper_max_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5747 case OPC2_32_RR_MAX_BU
:
5748 gen_helper_max_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5750 case OPC2_32_RR_MAX_H
:
5751 gen_helper_max_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5753 case OPC2_32_RR_MAX_HU
:
5754 gen_helper_max_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5756 case OPC2_32_RR_MIN
:
5757 tcg_gen_movcond_tl(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5758 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5760 case OPC2_32_RR_MIN_U
:
5761 tcg_gen_movcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5762 cpu_gpr_d
[r2
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5764 case OPC2_32_RR_MIN_B
:
5765 gen_helper_min_b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5767 case OPC2_32_RR_MIN_BU
:
5768 gen_helper_min_bu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5770 case OPC2_32_RR_MIN_H
:
5771 gen_helper_min_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5773 case OPC2_32_RR_MIN_HU
:
5774 gen_helper_min_hu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5776 case OPC2_32_RR_MOV
:
5777 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5779 case OPC2_32_RR_MOV_64
:
5780 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5781 temp
= tcg_temp_new();
5784 tcg_gen_mov_tl(temp
, cpu_gpr_d
[r1
]);
5785 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5786 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
5788 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5791 case OPC2_32_RR_MOVS_64
:
5792 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
5794 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
5795 tcg_gen_sari_tl(cpu_gpr_d
[r3
+ 1], cpu_gpr_d
[r2
], 31);
5797 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5801 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5804 case OPC2_32_RR_OR_EQ
:
5805 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5806 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5808 case OPC2_32_RR_OR_GE
:
5809 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5810 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5812 case OPC2_32_RR_OR_GE_U
:
5813 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5814 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5816 case OPC2_32_RR_OR_LT
:
5817 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5818 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5820 case OPC2_32_RR_OR_LT_U
:
5821 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5822 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5824 case OPC2_32_RR_OR_NE
:
5825 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5826 cpu_gpr_d
[r2
], &tcg_gen_or_tl
);
5828 case OPC2_32_RR_SAT_B
:
5829 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7f, -0x80);
5831 case OPC2_32_RR_SAT_BU
:
5832 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xff);
5834 case OPC2_32_RR_SAT_H
:
5835 gen_saturate(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0x7fff, -0x8000);
5837 case OPC2_32_RR_SAT_HU
:
5838 gen_saturate_u(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 0xffff);
5840 case OPC2_32_RR_SH_EQ
:
5841 gen_sh_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5844 case OPC2_32_RR_SH_GE
:
5845 gen_sh_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5848 case OPC2_32_RR_SH_GE_U
:
5849 gen_sh_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5852 case OPC2_32_RR_SH_LT
:
5853 gen_sh_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5856 case OPC2_32_RR_SH_LT_U
:
5857 gen_sh_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5860 case OPC2_32_RR_SH_NE
:
5861 gen_sh_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5864 case OPC2_32_RR_SUB
:
5865 gen_sub_d(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5867 case OPC2_32_RR_SUB_B
:
5868 gen_helper_sub_b(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5870 case OPC2_32_RR_SUB_H
:
5871 gen_helper_sub_h(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5873 case OPC2_32_RR_SUBC
:
5874 gen_subc_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5876 case OPC2_32_RR_SUBS
:
5877 gen_subs(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5879 case OPC2_32_RR_SUBS_U
:
5880 gen_subsu(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5882 case OPC2_32_RR_SUBS_H
:
5883 gen_helper_sub_h_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5886 case OPC2_32_RR_SUBS_HU
:
5887 gen_helper_sub_h_suov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
5890 case OPC2_32_RR_SUBX
:
5891 gen_sub_CC(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5893 case OPC2_32_RR_XOR_EQ
:
5894 gen_accumulating_cond(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5895 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5897 case OPC2_32_RR_XOR_GE
:
5898 gen_accumulating_cond(TCG_COND_GE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5899 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5901 case OPC2_32_RR_XOR_GE_U
:
5902 gen_accumulating_cond(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5903 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5905 case OPC2_32_RR_XOR_LT
:
5906 gen_accumulating_cond(TCG_COND_LT
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5907 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5909 case OPC2_32_RR_XOR_LT_U
:
5910 gen_accumulating_cond(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5911 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5913 case OPC2_32_RR_XOR_NE
:
5914 gen_accumulating_cond(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
5915 cpu_gpr_d
[r2
], &tcg_gen_xor_tl
);
5918 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5922 static void decode_rr_logical_shift(DisasContext
*ctx
)
5927 r3
= MASK_OP_RR_D(ctx
->opcode
);
5928 r2
= MASK_OP_RR_S2(ctx
->opcode
);
5929 r1
= MASK_OP_RR_S1(ctx
->opcode
);
5930 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
5933 case OPC2_32_RR_AND
:
5934 tcg_gen_and_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5936 case OPC2_32_RR_ANDN
:
5937 tcg_gen_andc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5939 case OPC2_32_RR_CLO
:
5940 tcg_gen_not_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5941 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], TARGET_LONG_BITS
);
5943 case OPC2_32_RR_CLO_H
:
5944 gen_helper_clo_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5946 case OPC2_32_RR_CLS
:
5947 tcg_gen_clrsb_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5949 case OPC2_32_RR_CLS_H
:
5950 gen_helper_cls_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5952 case OPC2_32_RR_CLZ
:
5953 tcg_gen_clzi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], TARGET_LONG_BITS
);
5955 case OPC2_32_RR_CLZ_H
:
5956 gen_helper_clz_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
5958 case OPC2_32_RR_NAND
:
5959 tcg_gen_nand_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5961 case OPC2_32_RR_NOR
:
5962 tcg_gen_nor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5965 tcg_gen_or_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5967 case OPC2_32_RR_ORN
:
5968 tcg_gen_orc_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5971 gen_helper_sh(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5973 case OPC2_32_RR_SH_H
:
5974 gen_helper_sh_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5976 case OPC2_32_RR_SHA
:
5977 gen_helper_sha(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5979 case OPC2_32_RR_SHA_H
:
5980 gen_helper_sha_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5982 case OPC2_32_RR_SHAS
:
5983 gen_shas(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5985 case OPC2_32_RR_XNOR
:
5986 tcg_gen_eqv_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5988 case OPC2_32_RR_XOR
:
5989 tcg_gen_xor_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
5992 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
5996 static void decode_rr_address(DisasContext
*ctx
)
6002 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6003 r3
= MASK_OP_RR_D(ctx
->opcode
);
6004 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6005 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6006 n
= MASK_OP_RR_N(ctx
->opcode
);
6009 case OPC2_32_RR_ADD_A
:
6010 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6012 case OPC2_32_RR_ADDSC_A
:
6013 temp
= tcg_temp_new();
6014 tcg_gen_shli_tl(temp
, cpu_gpr_d
[r1
], n
);
6015 tcg_gen_add_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
], temp
);
6017 case OPC2_32_RR_ADDSC_AT
:
6018 temp
= tcg_temp_new();
6019 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 3);
6020 tcg_gen_add_tl(temp
, cpu_gpr_a
[r2
], temp
);
6021 tcg_gen_andi_tl(cpu_gpr_a
[r3
], temp
, 0xFFFFFFFC);
6023 case OPC2_32_RR_EQ_A
:
6024 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6027 case OPC2_32_RR_EQZ
:
6028 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6030 case OPC2_32_RR_GE_A
:
6031 tcg_gen_setcond_tl(TCG_COND_GEU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6034 case OPC2_32_RR_LT_A
:
6035 tcg_gen_setcond_tl(TCG_COND_LTU
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6038 case OPC2_32_RR_MOV_A
:
6039 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_d
[r2
]);
6041 case OPC2_32_RR_MOV_AA
:
6042 tcg_gen_mov_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r2
]);
6044 case OPC2_32_RR_MOV_D
:
6045 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_a
[r2
]);
6047 case OPC2_32_RR_NE_A
:
6048 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
],
6051 case OPC2_32_RR_NEZ_A
:
6052 tcg_gen_setcondi_tl(TCG_COND_NE
, cpu_gpr_d
[r3
], cpu_gpr_a
[r1
], 0);
6054 case OPC2_32_RR_SUB_A
:
6055 tcg_gen_sub_tl(cpu_gpr_a
[r3
], cpu_gpr_a
[r1
], cpu_gpr_a
[r2
]);
6058 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6062 static void decode_rr_idirect(DisasContext
*ctx
)
6067 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6068 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6072 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6074 case OPC2_32_RR_JLI
:
6075 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6076 tcg_gen_movi_tl(cpu_gpr_a
[11], ctx
->pc_succ_insn
);
6078 case OPC2_32_RR_CALLI
:
6079 gen_helper_1arg(call
, ctx
->pc_succ_insn
);
6080 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6082 case OPC2_32_RR_FCALLI
:
6083 gen_fcall_save_ctx(ctx
);
6084 tcg_gen_andi_tl(cpu_PC
, cpu_gpr_a
[r1
], ~0x1);
6087 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6090 ctx
->base
.is_jmp
= DISAS_JUMP
;
6093 static void decode_rr_divide(DisasContext
*ctx
)
6098 TCGv temp
, temp2
, temp3
;
6100 op2
= MASK_OP_RR_OP2(ctx
->opcode
);
6101 r3
= MASK_OP_RR_D(ctx
->opcode
);
6102 r2
= MASK_OP_RR_S2(ctx
->opcode
);
6103 r1
= MASK_OP_RR_S1(ctx
->opcode
);
6106 case OPC2_32_RR_BMERGE
:
6107 gen_helper_bmerge(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6109 case OPC2_32_RR_BSPLIT
:
6111 gen_bsplit(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6113 case OPC2_32_RR_DVINIT_B
:
6115 gen_dvinit_b(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6118 case OPC2_32_RR_DVINIT_BU
:
6119 temp
= tcg_temp_new();
6120 temp2
= tcg_temp_new();
6121 temp3
= tcg_temp_new();
6123 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 8);
6125 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6126 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6127 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6128 tcg_gen_abs_tl(temp
, temp3
);
6129 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6130 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6132 /* overflow = (D[b] == 0) */
6133 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6135 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6137 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6139 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 24);
6140 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6142 case OPC2_32_RR_DVINIT_H
:
6144 gen_dvinit_h(ctx
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6147 case OPC2_32_RR_DVINIT_HU
:
6148 temp
= tcg_temp_new();
6149 temp2
= tcg_temp_new();
6150 temp3
= tcg_temp_new();
6152 tcg_gen_shri_tl(temp3
, cpu_gpr_d
[r1
], 16);
6154 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6155 if (!has_feature(ctx
, TRICORE_FEATURE_131
)) {
6156 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
6157 tcg_gen_abs_tl(temp
, temp3
);
6158 tcg_gen_abs_tl(temp2
, cpu_gpr_d
[r2
]);
6159 tcg_gen_setcond_tl(TCG_COND_GE
, cpu_PSW_V
, temp
, temp2
);
6161 /* overflow = (D[b] == 0) */
6162 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6164 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6166 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6168 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 16);
6169 tcg_gen_mov_tl(cpu_gpr_d
[r3
+1], temp3
);
6171 case OPC2_32_RR_DVINIT
:
6172 temp
= tcg_temp_new();
6173 temp2
= tcg_temp_new();
6175 /* overflow = ((D[b] == 0) ||
6176 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
6177 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp
, cpu_gpr_d
[r2
], 0xffffffff);
6178 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r1
], 0x80000000);
6179 tcg_gen_and_tl(temp
, temp
, temp2
);
6180 tcg_gen_setcondi_tl(TCG_COND_EQ
, temp2
, cpu_gpr_d
[r2
], 0);
6181 tcg_gen_or_tl(cpu_PSW_V
, temp
, temp2
);
6182 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6184 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6186 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6188 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6189 /* sign extend to high reg */
6190 tcg_gen_sari_tl(cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], 31);
6192 case OPC2_32_RR_DVINIT_U
:
6194 /* overflow = (D[b] == 0) */
6195 tcg_gen_setcondi_tl(TCG_COND_EQ
, cpu_PSW_V
, cpu_gpr_d
[r2
], 0);
6196 tcg_gen_shli_tl(cpu_PSW_V
, cpu_PSW_V
, 31);
6198 tcg_gen_or_tl(cpu_PSW_SV
, cpu_PSW_SV
, cpu_PSW_V
);
6200 tcg_gen_movi_tl(cpu_PSW_AV
, 0);
6202 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6203 /* zero extend to high reg*/
6204 tcg_gen_movi_tl(cpu_gpr_d
[r3
+1], 0);
6206 case OPC2_32_RR_PARITY
:
6207 gen_helper_parity(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6209 case OPC2_32_RR_UNPACK
:
6211 gen_unpack(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6213 case OPC2_32_RR_CRC32_B
:
6214 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6215 gen_helper_crc32b(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6217 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6220 case OPC2_32_RR_CRC32
: /* CRC32B.W in 1.6.2 */
6221 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
6222 gen_helper_crc32_be(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6224 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6227 case OPC2_32_RR_CRC32L_W
:
6228 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6229 gen_helper_crc32_le(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6231 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6235 case OPC2_32_RR_POPCNT_W
:
6236 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6237 tcg_gen_ctpop_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6239 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6242 case OPC2_32_RR_DIV
:
6243 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6245 GEN_HELPER_RR(divide
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6248 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6251 case OPC2_32_RR_DIV_U
:
6252 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
6254 GEN_HELPER_RR(divide_u
, cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
6255 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6257 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6260 case OPC2_32_RR_MUL_F
:
6261 gen_helper_fmul(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6263 case OPC2_32_RR_DIV_F
:
6264 gen_helper_fdiv(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6266 case OPC2_32_RR_FTOHP
:
6267 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6268 gen_helper_ftohp(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6270 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6273 case OPC2_32_RR_HPTOF
:
6274 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6275 gen_helper_hptof(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6277 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6280 case OPC2_32_RR_CMP_F
:
6281 gen_helper_fcmp(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6283 case OPC2_32_RR_FTOI
:
6284 gen_helper_ftoi(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6286 case OPC2_32_RR_ITOF
:
6287 gen_helper_itof(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6289 case OPC2_32_RR_FTOU
:
6290 gen_helper_ftou(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6292 case OPC2_32_RR_FTOUZ
:
6293 if (has_feature(ctx
, TRICORE_FEATURE_131
)) {
6294 gen_helper_ftouz(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6296 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6299 case OPC2_32_RR_UPDFL
:
6300 gen_helper_updfl(tcg_env
, cpu_gpr_d
[r1
]);
6302 case OPC2_32_RR_UTOF
:
6303 gen_helper_utof(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6305 case OPC2_32_RR_FTOIZ
:
6306 gen_helper_ftoiz(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6308 case OPC2_32_RR_QSEED_F
:
6309 gen_helper_qseed(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
]);
6312 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6317 static void decode_rr1_mul(DisasContext
*ctx
)
6325 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6326 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6327 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6328 n
= tcg_constant_i32(MASK_OP_RR1_N(ctx
->opcode
));
6329 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6332 case OPC2_32_RR1_MUL_H_32_LL
:
6333 temp64
= tcg_temp_new_i64();
6335 GEN_HELPER_LL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6336 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6337 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6339 case OPC2_32_RR1_MUL_H_32_LU
:
6340 temp64
= tcg_temp_new_i64();
6342 GEN_HELPER_LU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6343 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6344 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6346 case OPC2_32_RR1_MUL_H_32_UL
:
6347 temp64
= tcg_temp_new_i64();
6349 GEN_HELPER_UL(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6350 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6351 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6353 case OPC2_32_RR1_MUL_H_32_UU
:
6354 temp64
= tcg_temp_new_i64();
6356 GEN_HELPER_UU(mul_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6357 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6358 gen_calc_usb_mul_h(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1]);
6360 case OPC2_32_RR1_MULM_H_64_LL
:
6361 temp64
= tcg_temp_new_i64();
6363 GEN_HELPER_LL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6364 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6366 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6368 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6370 case OPC2_32_RR1_MULM_H_64_LU
:
6371 temp64
= tcg_temp_new_i64();
6373 GEN_HELPER_LU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6374 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6376 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6378 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6380 case OPC2_32_RR1_MULM_H_64_UL
:
6381 temp64
= tcg_temp_new_i64();
6383 GEN_HELPER_UL(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6384 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6386 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6388 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6390 case OPC2_32_RR1_MULM_H_64_UU
:
6391 temp64
= tcg_temp_new_i64();
6393 GEN_HELPER_UU(mulm_h
, temp64
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6394 tcg_gen_extr_i64_i32(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], temp64
);
6396 tcg_gen_movi_tl(cpu_PSW_V
, 0);
6398 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
6400 case OPC2_32_RR1_MULR_H_16_LL
:
6401 GEN_HELPER_LL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6402 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6404 case OPC2_32_RR1_MULR_H_16_LU
:
6405 GEN_HELPER_LU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6406 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6408 case OPC2_32_RR1_MULR_H_16_UL
:
6409 GEN_HELPER_UL(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6410 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6412 case OPC2_32_RR1_MULR_H_16_UU
:
6413 GEN_HELPER_UU(mulr_h
, cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
);
6414 gen_calc_usb_mulr_h(cpu_gpr_d
[r3
]);
6417 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6421 static void decode_rr1_mulq(DisasContext
*ctx
)
6429 r1
= MASK_OP_RR1_S1(ctx
->opcode
);
6430 r2
= MASK_OP_RR1_S2(ctx
->opcode
);
6431 r3
= MASK_OP_RR1_D(ctx
->opcode
);
6432 n
= MASK_OP_RR1_N(ctx
->opcode
);
6433 op2
= MASK_OP_RR1_OP2(ctx
->opcode
);
6435 temp
= tcg_temp_new();
6436 temp2
= tcg_temp_new();
6439 case OPC2_32_RR1_MUL_Q_32
:
6440 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 32);
6442 case OPC2_32_RR1_MUL_Q_64
:
6444 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6447 case OPC2_32_RR1_MUL_Q_32_L
:
6448 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6449 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6451 case OPC2_32_RR1_MUL_Q_64_L
:
6453 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
6454 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6456 case OPC2_32_RR1_MUL_Q_32_U
:
6457 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6458 gen_mul_q(cpu_gpr_d
[r3
], temp
, cpu_gpr_d
[r1
], temp
, n
, 16);
6460 case OPC2_32_RR1_MUL_Q_64_U
:
6462 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
6463 gen_mul_q(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
, n
, 0);
6465 case OPC2_32_RR1_MUL_Q_32_LL
:
6466 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6467 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6468 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6470 case OPC2_32_RR1_MUL_Q_32_UU
:
6471 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6472 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6473 gen_mul_q_16(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6475 case OPC2_32_RR1_MULR_Q_32_L
:
6476 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
6477 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
6478 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6480 case OPC2_32_RR1_MULR_Q_32_U
:
6481 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
6482 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
6483 gen_mulr_q(cpu_gpr_d
[r3
], temp
, temp2
, n
);
6486 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6491 static void decode_rr2_mul(DisasContext
*ctx
)
6496 op2
= MASK_OP_RR2_OP2(ctx
->opcode
);
6497 r1
= MASK_OP_RR2_S1(ctx
->opcode
);
6498 r2
= MASK_OP_RR2_S2(ctx
->opcode
);
6499 r3
= MASK_OP_RR2_D(ctx
->opcode
);
6501 case OPC2_32_RR2_MUL_32
:
6502 gen_mul_i32s(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6504 case OPC2_32_RR2_MUL_64
:
6506 gen_mul_i64s(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6509 case OPC2_32_RR2_MULS_32
:
6510 gen_helper_mul_ssov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
6513 case OPC2_32_RR2_MUL_U_64
:
6515 gen_mul_i64u(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
],
6518 case OPC2_32_RR2_MULS_U_32
:
6519 gen_helper_mul_suov(cpu_gpr_d
[r3
], tcg_env
, cpu_gpr_d
[r1
],
6523 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6528 static void decode_rrpw_extract_insert(DisasContext
*ctx
)
6535 op2
= MASK_OP_RRPW_OP2(ctx
->opcode
);
6536 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
6537 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
6538 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
6539 pos
= MASK_OP_RRPW_POS(ctx
->opcode
);
6540 width
= MASK_OP_RRPW_WIDTH(ctx
->opcode
);
6543 case OPC2_32_RRPW_EXTR
:
6545 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6549 if (pos
+ width
<= 32) {
6550 /* optimize special cases */
6551 if ((pos
== 0) && (width
== 8)) {
6552 tcg_gen_ext8s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6553 } else if ((pos
== 0) && (width
== 16)) {
6554 tcg_gen_ext16s_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6556 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], 32 - pos
- width
);
6557 tcg_gen_sari_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], 32 - width
);
6561 case OPC2_32_RRPW_EXTR_U
:
6563 tcg_gen_movi_tl(cpu_gpr_d
[r3
], 0);
6565 tcg_gen_shri_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], pos
);
6566 tcg_gen_andi_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r3
], ~0u >> (32-width
));
6569 case OPC2_32_RRPW_IMASK
:
6572 if (pos
+ width
<= 32) {
6573 temp
= tcg_temp_new();
6574 tcg_gen_movi_tl(temp
, ((1u << width
) - 1) << pos
);
6575 tcg_gen_shli_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], pos
);
6576 tcg_gen_mov_tl(cpu_gpr_d
[r3
+ 1], temp
);
6580 case OPC2_32_RRPW_INSERT
:
6581 /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
6583 tcg_gen_mov_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
]);
6584 } else if (pos
+ width
<= 32) {
6585 tcg_gen_deposit_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6590 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6595 static void decode_rrr_cond_select(DisasContext
*ctx
)
6601 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6602 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6603 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6604 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6605 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6608 case OPC2_32_RRR_CADD
:
6609 gen_cond_add(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6610 cpu_gpr_d
[r4
], cpu_gpr_d
[r3
]);
6612 case OPC2_32_RRR_CADDN
:
6613 gen_cond_add(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6616 case OPC2_32_RRR_CSUB
:
6617 gen_cond_sub(TCG_COND_NE
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6620 case OPC2_32_RRR_CSUBN
:
6621 gen_cond_sub(TCG_COND_EQ
, cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], cpu_gpr_d
[r4
],
6624 case OPC2_32_RRR_SEL
:
6625 temp
= tcg_constant_i32(0);
6626 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6627 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6629 case OPC2_32_RRR_SELN
:
6630 temp
= tcg_constant_i32(0);
6631 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
,
6632 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
]);
6635 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6639 static void decode_rrr_divide(DisasContext
*ctx
)
6645 op2
= MASK_OP_RRR_OP2(ctx
->opcode
);
6646 r1
= MASK_OP_RRR_S1(ctx
->opcode
);
6647 r2
= MASK_OP_RRR_S2(ctx
->opcode
);
6648 r3
= MASK_OP_RRR_S3(ctx
->opcode
);
6649 r4
= MASK_OP_RRR_D(ctx
->opcode
);
6652 case OPC2_32_RRR_DVADJ
:
6655 GEN_HELPER_RRR(dvadj
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6656 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6658 case OPC2_32_RRR_DVSTEP
:
6661 GEN_HELPER_RRR(dvstep
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6662 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6664 case OPC2_32_RRR_DVSTEP_U
:
6667 GEN_HELPER_RRR(dvstep_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6670 case OPC2_32_RRR_IXMAX
:
6673 GEN_HELPER_RRR(ixmax
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6674 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6676 case OPC2_32_RRR_IXMAX_U
:
6679 GEN_HELPER_RRR(ixmax_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6680 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6682 case OPC2_32_RRR_IXMIN
:
6685 GEN_HELPER_RRR(ixmin
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6686 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6688 case OPC2_32_RRR_IXMIN_U
:
6691 GEN_HELPER_RRR(ixmin_u
, cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6692 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6694 case OPC2_32_RRR_PACK
:
6696 gen_helper_pack(cpu_gpr_d
[r4
], cpu_PSW_C
, cpu_gpr_d
[r3
],
6697 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
]);
6699 case OPC2_32_RRR_CRCN
:
6700 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
6701 gen_helper_crcn(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
6704 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6707 case OPC2_32_RRR_ADD_F
:
6708 gen_helper_fadd(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6710 case OPC2_32_RRR_SUB_F
:
6711 gen_helper_fsub(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
], cpu_gpr_d
[r3
]);
6713 case OPC2_32_RRR_MADD_F
:
6714 gen_helper_fmadd(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6715 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6717 case OPC2_32_RRR_MSUB_F
:
6718 gen_helper_fmsub(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6719 cpu_gpr_d
[r2
], cpu_gpr_d
[r3
]);
6722 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6727 static void decode_rrr2_madd(DisasContext
*ctx
)
6730 uint32_t r1
, r2
, r3
, r4
;
6732 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6733 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6734 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6735 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6736 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6738 case OPC2_32_RRR2_MADD_32
:
6739 gen_madd32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6742 case OPC2_32_RRR2_MADD_64
:
6745 gen_madd64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6746 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6748 case OPC2_32_RRR2_MADDS_32
:
6749 gen_helper_madd32_ssov(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6750 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6752 case OPC2_32_RRR2_MADDS_64
:
6755 gen_madds_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6756 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6758 case OPC2_32_RRR2_MADD_U_64
:
6761 gen_maddu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6762 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6764 case OPC2_32_RRR2_MADDS_U_32
:
6765 gen_helper_madd32_suov(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6766 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6768 case OPC2_32_RRR2_MADDS_U_64
:
6771 gen_maddsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6772 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6775 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6779 static void decode_rrr2_msub(DisasContext
*ctx
)
6782 uint32_t r1
, r2
, r3
, r4
;
6784 op2
= MASK_OP_RRR2_OP2(ctx
->opcode
);
6785 r1
= MASK_OP_RRR2_S1(ctx
->opcode
);
6786 r2
= MASK_OP_RRR2_S2(ctx
->opcode
);
6787 r3
= MASK_OP_RRR2_S3(ctx
->opcode
);
6788 r4
= MASK_OP_RRR2_D(ctx
->opcode
);
6791 case OPC2_32_RRR2_MSUB_32
:
6792 gen_msub32_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r3
],
6795 case OPC2_32_RRR2_MSUB_64
:
6798 gen_msub64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6799 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6801 case OPC2_32_RRR2_MSUBS_32
:
6802 gen_helper_msub32_ssov(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6803 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6805 case OPC2_32_RRR2_MSUBS_64
:
6808 gen_msubs_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6809 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6811 case OPC2_32_RRR2_MSUB_U_64
:
6814 gen_msubu64_d(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6815 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6817 case OPC2_32_RRR2_MSUBS_U_32
:
6818 gen_helper_msub32_suov(cpu_gpr_d
[r4
], tcg_env
, cpu_gpr_d
[r1
],
6819 cpu_gpr_d
[r3
], cpu_gpr_d
[r2
]);
6821 case OPC2_32_RRR2_MSUBS_U_64
:
6824 gen_msubsu_64(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r1
],
6825 cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1], cpu_gpr_d
[r2
]);
6828 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6833 static void decode_rrr1_madd(DisasContext
*ctx
)
6836 uint32_t r1
, r2
, r3
, r4
, n
;
6838 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6839 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6840 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6841 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6842 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6843 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6846 case OPC2_32_RRR1_MADD_H_LL
:
6849 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6850 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6852 case OPC2_32_RRR1_MADD_H_LU
:
6855 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6856 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6858 case OPC2_32_RRR1_MADD_H_UL
:
6861 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6862 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6864 case OPC2_32_RRR1_MADD_H_UU
:
6867 gen_madd_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6868 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6870 case OPC2_32_RRR1_MADDS_H_LL
:
6873 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6874 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6876 case OPC2_32_RRR1_MADDS_H_LU
:
6879 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6880 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6882 case OPC2_32_RRR1_MADDS_H_UL
:
6885 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6886 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6888 case OPC2_32_RRR1_MADDS_H_UU
:
6891 gen_madds_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6892 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6894 case OPC2_32_RRR1_MADDM_H_LL
:
6897 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6898 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6900 case OPC2_32_RRR1_MADDM_H_LU
:
6903 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6904 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6906 case OPC2_32_RRR1_MADDM_H_UL
:
6909 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6910 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6912 case OPC2_32_RRR1_MADDM_H_UU
:
6915 gen_maddm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6916 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6918 case OPC2_32_RRR1_MADDMS_H_LL
:
6921 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6922 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
6924 case OPC2_32_RRR1_MADDMS_H_LU
:
6927 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6928 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
6930 case OPC2_32_RRR1_MADDMS_H_UL
:
6933 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6934 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
6936 case OPC2_32_RRR1_MADDMS_H_UU
:
6939 gen_maddms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
6940 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
6942 case OPC2_32_RRR1_MADDR_H_LL
:
6943 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6944 cpu_gpr_d
[r2
], n
, MODE_LL
);
6946 case OPC2_32_RRR1_MADDR_H_LU
:
6947 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6948 cpu_gpr_d
[r2
], n
, MODE_LU
);
6950 case OPC2_32_RRR1_MADDR_H_UL
:
6951 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6952 cpu_gpr_d
[r2
], n
, MODE_UL
);
6954 case OPC2_32_RRR1_MADDR_H_UU
:
6955 gen_maddr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6956 cpu_gpr_d
[r2
], n
, MODE_UU
);
6958 case OPC2_32_RRR1_MADDRS_H_LL
:
6959 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6960 cpu_gpr_d
[r2
], n
, MODE_LL
);
6962 case OPC2_32_RRR1_MADDRS_H_LU
:
6963 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6964 cpu_gpr_d
[r2
], n
, MODE_LU
);
6966 case OPC2_32_RRR1_MADDRS_H_UL
:
6967 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6968 cpu_gpr_d
[r2
], n
, MODE_UL
);
6970 case OPC2_32_RRR1_MADDRS_H_UU
:
6971 gen_maddr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6972 cpu_gpr_d
[r2
], n
, MODE_UU
);
6975 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
6979 static void decode_rrr1_maddq_h(DisasContext
*ctx
)
6982 uint32_t r1
, r2
, r3
, r4
, n
;
6985 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
6986 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
6987 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
6988 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
6989 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
6990 n
= MASK_OP_RRR1_N(ctx
->opcode
);
6992 temp
= tcg_temp_new();
6993 temp2
= tcg_temp_new();
6996 case OPC2_32_RRR1_MADD_Q_32
:
6997 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
6998 cpu_gpr_d
[r2
], n
, 32);
7000 case OPC2_32_RRR1_MADD_Q_64
:
7003 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7004 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7007 case OPC2_32_RRR1_MADD_Q_32_L
:
7008 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7009 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7012 case OPC2_32_RRR1_MADD_Q_64_L
:
7015 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7016 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7017 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7020 case OPC2_32_RRR1_MADD_Q_32_U
:
7021 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7022 gen_madd32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7025 case OPC2_32_RRR1_MADD_Q_64_U
:
7028 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7029 gen_madd64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7030 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7033 case OPC2_32_RRR1_MADD_Q_32_LL
:
7034 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7035 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7036 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7038 case OPC2_32_RRR1_MADD_Q_64_LL
:
7041 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7042 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7043 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7044 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7046 case OPC2_32_RRR1_MADD_Q_32_UU
:
7047 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7048 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7049 gen_m16add32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7051 case OPC2_32_RRR1_MADD_Q_64_UU
:
7054 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7055 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7056 gen_m16add64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7057 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7059 case OPC2_32_RRR1_MADDS_Q_32
:
7060 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7061 cpu_gpr_d
[r2
], n
, 32);
7063 case OPC2_32_RRR1_MADDS_Q_64
:
7066 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7067 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7070 case OPC2_32_RRR1_MADDS_Q_32_L
:
7071 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7072 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7075 case OPC2_32_RRR1_MADDS_Q_64_L
:
7078 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7079 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7080 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7083 case OPC2_32_RRR1_MADDS_Q_32_U
:
7084 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7085 gen_madds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7088 case OPC2_32_RRR1_MADDS_Q_64_U
:
7091 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7092 gen_madds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7093 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7096 case OPC2_32_RRR1_MADDS_Q_32_LL
:
7097 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7098 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7099 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7101 case OPC2_32_RRR1_MADDS_Q_64_LL
:
7104 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7105 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7106 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7107 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7109 case OPC2_32_RRR1_MADDS_Q_32_UU
:
7110 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7111 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7112 gen_m16adds32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7114 case OPC2_32_RRR1_MADDS_Q_64_UU
:
7117 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7118 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7119 gen_m16adds64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7120 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7122 case OPC2_32_RRR1_MADDR_H_64_UL
:
7124 gen_maddr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7125 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7127 case OPC2_32_RRR1_MADDRS_H_64_UL
:
7129 gen_maddr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7130 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7132 case OPC2_32_RRR1_MADDR_Q_32_LL
:
7133 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7134 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7135 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7137 case OPC2_32_RRR1_MADDR_Q_32_UU
:
7138 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7139 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7140 gen_maddr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7142 case OPC2_32_RRR1_MADDRS_Q_32_LL
:
7143 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7144 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7145 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7147 case OPC2_32_RRR1_MADDRS_Q_32_UU
:
7148 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7149 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7150 gen_maddrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7153 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7157 static void decode_rrr1_maddsu_h(DisasContext
*ctx
)
7160 uint32_t r1
, r2
, r3
, r4
, n
;
7162 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7163 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7164 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7165 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7166 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7167 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7170 case OPC2_32_RRR1_MADDSU_H_32_LL
:
7173 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7174 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7176 case OPC2_32_RRR1_MADDSU_H_32_LU
:
7179 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7180 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7182 case OPC2_32_RRR1_MADDSU_H_32_UL
:
7185 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7186 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7188 case OPC2_32_RRR1_MADDSU_H_32_UU
:
7191 gen_maddsu_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7192 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7194 case OPC2_32_RRR1_MADDSUS_H_32_LL
:
7197 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7198 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7201 case OPC2_32_RRR1_MADDSUS_H_32_LU
:
7204 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7205 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7208 case OPC2_32_RRR1_MADDSUS_H_32_UL
:
7211 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7212 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7215 case OPC2_32_RRR1_MADDSUS_H_32_UU
:
7218 gen_maddsus_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7219 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7222 case OPC2_32_RRR1_MADDSUM_H_64_LL
:
7225 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7226 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7229 case OPC2_32_RRR1_MADDSUM_H_64_LU
:
7232 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7233 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7236 case OPC2_32_RRR1_MADDSUM_H_64_UL
:
7239 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7240 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7243 case OPC2_32_RRR1_MADDSUM_H_64_UU
:
7246 gen_maddsum_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7247 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7250 case OPC2_32_RRR1_MADDSUMS_H_64_LL
:
7253 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7254 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7257 case OPC2_32_RRR1_MADDSUMS_H_64_LU
:
7260 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7261 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7264 case OPC2_32_RRR1_MADDSUMS_H_64_UL
:
7267 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7268 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7271 case OPC2_32_RRR1_MADDSUMS_H_64_UU
:
7274 gen_maddsums_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7275 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7278 case OPC2_32_RRR1_MADDSUR_H_16_LL
:
7279 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7280 cpu_gpr_d
[r2
], n
, MODE_LL
);
7282 case OPC2_32_RRR1_MADDSUR_H_16_LU
:
7283 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7284 cpu_gpr_d
[r2
], n
, MODE_LU
);
7286 case OPC2_32_RRR1_MADDSUR_H_16_UL
:
7287 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7288 cpu_gpr_d
[r2
], n
, MODE_UL
);
7290 case OPC2_32_RRR1_MADDSUR_H_16_UU
:
7291 gen_maddsur32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7292 cpu_gpr_d
[r2
], n
, MODE_UU
);
7294 case OPC2_32_RRR1_MADDSURS_H_16_LL
:
7295 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7296 cpu_gpr_d
[r2
], n
, MODE_LL
);
7298 case OPC2_32_RRR1_MADDSURS_H_16_LU
:
7299 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7300 cpu_gpr_d
[r2
], n
, MODE_LU
);
7302 case OPC2_32_RRR1_MADDSURS_H_16_UL
:
7303 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7304 cpu_gpr_d
[r2
], n
, MODE_UL
);
7306 case OPC2_32_RRR1_MADDSURS_H_16_UU
:
7307 gen_maddsur32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7308 cpu_gpr_d
[r2
], n
, MODE_UU
);
7311 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7315 static void decode_rrr1_msub(DisasContext
*ctx
)
7318 uint32_t r1
, r2
, r3
, r4
, n
;
7320 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7321 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7322 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7323 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7324 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7325 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7328 case OPC2_32_RRR1_MSUB_H_LL
:
7331 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7332 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7334 case OPC2_32_RRR1_MSUB_H_LU
:
7337 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7338 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7340 case OPC2_32_RRR1_MSUB_H_UL
:
7343 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7344 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7346 case OPC2_32_RRR1_MSUB_H_UU
:
7349 gen_msub_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7350 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7352 case OPC2_32_RRR1_MSUBS_H_LL
:
7355 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7356 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7358 case OPC2_32_RRR1_MSUBS_H_LU
:
7361 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7362 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7364 case OPC2_32_RRR1_MSUBS_H_UL
:
7367 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7368 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7370 case OPC2_32_RRR1_MSUBS_H_UU
:
7373 gen_msubs_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7374 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7376 case OPC2_32_RRR1_MSUBM_H_LL
:
7379 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7380 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7382 case OPC2_32_RRR1_MSUBM_H_LU
:
7385 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7386 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7388 case OPC2_32_RRR1_MSUBM_H_UL
:
7391 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7392 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7394 case OPC2_32_RRR1_MSUBM_H_UU
:
7397 gen_msubm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7398 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7400 case OPC2_32_RRR1_MSUBMS_H_LL
:
7403 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7404 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7406 case OPC2_32_RRR1_MSUBMS_H_LU
:
7409 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7410 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7412 case OPC2_32_RRR1_MSUBMS_H_UL
:
7415 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7416 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7418 case OPC2_32_RRR1_MSUBMS_H_UU
:
7421 gen_msubms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7422 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7424 case OPC2_32_RRR1_MSUBR_H_LL
:
7425 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7426 cpu_gpr_d
[r2
], n
, MODE_LL
);
7428 case OPC2_32_RRR1_MSUBR_H_LU
:
7429 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7430 cpu_gpr_d
[r2
], n
, MODE_LU
);
7432 case OPC2_32_RRR1_MSUBR_H_UL
:
7433 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7434 cpu_gpr_d
[r2
], n
, MODE_UL
);
7436 case OPC2_32_RRR1_MSUBR_H_UU
:
7437 gen_msubr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7438 cpu_gpr_d
[r2
], n
, MODE_UU
);
7440 case OPC2_32_RRR1_MSUBRS_H_LL
:
7441 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7442 cpu_gpr_d
[r2
], n
, MODE_LL
);
7444 case OPC2_32_RRR1_MSUBRS_H_LU
:
7445 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7446 cpu_gpr_d
[r2
], n
, MODE_LU
);
7448 case OPC2_32_RRR1_MSUBRS_H_UL
:
7449 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7450 cpu_gpr_d
[r2
], n
, MODE_UL
);
7452 case OPC2_32_RRR1_MSUBRS_H_UU
:
7453 gen_msubr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7454 cpu_gpr_d
[r2
], n
, MODE_UU
);
7457 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7461 static void decode_rrr1_msubq_h(DisasContext
*ctx
)
7464 uint32_t r1
, r2
, r3
, r4
, n
;
7467 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7468 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7469 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7470 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7471 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7472 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7474 temp
= tcg_temp_new();
7475 temp2
= tcg_temp_new();
7478 case OPC2_32_RRR1_MSUB_Q_32
:
7479 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7480 cpu_gpr_d
[r2
], n
, 32);
7482 case OPC2_32_RRR1_MSUB_Q_64
:
7485 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7486 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7489 case OPC2_32_RRR1_MSUB_Q_32_L
:
7490 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7491 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7494 case OPC2_32_RRR1_MSUB_Q_64_L
:
7497 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7498 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7499 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7502 case OPC2_32_RRR1_MSUB_Q_32_U
:
7503 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7504 gen_msub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7507 case OPC2_32_RRR1_MSUB_Q_64_U
:
7510 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7511 gen_msub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7512 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7515 case OPC2_32_RRR1_MSUB_Q_32_LL
:
7516 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7517 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7518 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7520 case OPC2_32_RRR1_MSUB_Q_64_LL
:
7523 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7524 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7525 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7526 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7528 case OPC2_32_RRR1_MSUB_Q_32_UU
:
7529 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7530 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7531 gen_m16sub32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7533 case OPC2_32_RRR1_MSUB_Q_64_UU
:
7536 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7537 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7538 gen_m16sub64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7539 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7541 case OPC2_32_RRR1_MSUBS_Q_32
:
7542 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7543 cpu_gpr_d
[r2
], n
, 32);
7545 case OPC2_32_RRR1_MSUBS_Q_64
:
7548 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7549 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7552 case OPC2_32_RRR1_MSUBS_Q_32_L
:
7553 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7554 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7557 case OPC2_32_RRR1_MSUBS_Q_64_L
:
7560 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r2
]);
7561 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7562 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7565 case OPC2_32_RRR1_MSUBS_Q_32_U
:
7566 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7567 gen_msubs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7570 case OPC2_32_RRR1_MSUBS_Q_64_U
:
7573 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r2
], 16);
7574 gen_msubs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7575 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], temp
,
7578 case OPC2_32_RRR1_MSUBS_Q_32_LL
:
7579 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7580 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7581 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7583 case OPC2_32_RRR1_MSUBS_Q_64_LL
:
7586 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7587 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7588 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7589 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7591 case OPC2_32_RRR1_MSUBS_Q_32_UU
:
7592 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7593 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7594 gen_m16subs32_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7596 case OPC2_32_RRR1_MSUBS_Q_64_UU
:
7599 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7600 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7601 gen_m16subs64_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7602 cpu_gpr_d
[r3
+1], temp
, temp2
, n
);
7604 case OPC2_32_RRR1_MSUBR_H_64_UL
:
7606 gen_msubr64_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7607 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7609 case OPC2_32_RRR1_MSUBRS_H_64_UL
:
7611 gen_msubr64s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r3
+1],
7612 cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, 2);
7614 case OPC2_32_RRR1_MSUBR_Q_32_LL
:
7615 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7616 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7617 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7619 case OPC2_32_RRR1_MSUBR_Q_32_UU
:
7620 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7621 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7622 gen_msubr_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7624 case OPC2_32_RRR1_MSUBRS_Q_32_LL
:
7625 tcg_gen_ext16s_tl(temp
, cpu_gpr_d
[r1
]);
7626 tcg_gen_ext16s_tl(temp2
, cpu_gpr_d
[r2
]);
7627 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7629 case OPC2_32_RRR1_MSUBRS_Q_32_UU
:
7630 tcg_gen_sari_tl(temp
, cpu_gpr_d
[r1
], 16);
7631 tcg_gen_sari_tl(temp2
, cpu_gpr_d
[r2
], 16);
7632 gen_msubrs_q(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], temp
, temp2
, n
);
7635 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7639 static void decode_rrr1_msubad_h(DisasContext
*ctx
)
7642 uint32_t r1
, r2
, r3
, r4
, n
;
7644 op2
= MASK_OP_RRR1_OP2(ctx
->opcode
);
7645 r1
= MASK_OP_RRR1_S1(ctx
->opcode
);
7646 r2
= MASK_OP_RRR1_S2(ctx
->opcode
);
7647 r3
= MASK_OP_RRR1_S3(ctx
->opcode
);
7648 r4
= MASK_OP_RRR1_D(ctx
->opcode
);
7649 n
= MASK_OP_RRR1_N(ctx
->opcode
);
7652 case OPC2_32_RRR1_MSUBAD_H_32_LL
:
7655 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7656 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LL
);
7658 case OPC2_32_RRR1_MSUBAD_H_32_LU
:
7661 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7662 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_LU
);
7664 case OPC2_32_RRR1_MSUBAD_H_32_UL
:
7667 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7668 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UL
);
7670 case OPC2_32_RRR1_MSUBAD_H_32_UU
:
7673 gen_msubad_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7674 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], n
, MODE_UU
);
7676 case OPC2_32_RRR1_MSUBADS_H_32_LL
:
7679 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7680 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7683 case OPC2_32_RRR1_MSUBADS_H_32_LU
:
7686 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7687 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7690 case OPC2_32_RRR1_MSUBADS_H_32_UL
:
7693 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7694 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7697 case OPC2_32_RRR1_MSUBADS_H_32_UU
:
7700 gen_msubads_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7701 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7704 case OPC2_32_RRR1_MSUBADM_H_64_LL
:
7707 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7708 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7711 case OPC2_32_RRR1_MSUBADM_H_64_LU
:
7714 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7715 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7718 case OPC2_32_RRR1_MSUBADM_H_64_UL
:
7721 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7722 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7725 case OPC2_32_RRR1_MSUBADM_H_64_UU
:
7728 gen_msubadm_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7729 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7732 case OPC2_32_RRR1_MSUBADMS_H_64_LL
:
7735 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7736 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7739 case OPC2_32_RRR1_MSUBADMS_H_64_LU
:
7742 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7743 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7746 case OPC2_32_RRR1_MSUBADMS_H_64_UL
:
7749 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7750 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7753 case OPC2_32_RRR1_MSUBADMS_H_64_UU
:
7756 gen_msubadms_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
+1], cpu_gpr_d
[r3
],
7757 cpu_gpr_d
[r3
+1], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
],
7760 case OPC2_32_RRR1_MSUBADR_H_16_LL
:
7761 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7762 cpu_gpr_d
[r2
], n
, MODE_LL
);
7764 case OPC2_32_RRR1_MSUBADR_H_16_LU
:
7765 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7766 cpu_gpr_d
[r2
], n
, MODE_LU
);
7768 case OPC2_32_RRR1_MSUBADR_H_16_UL
:
7769 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7770 cpu_gpr_d
[r2
], n
, MODE_UL
);
7772 case OPC2_32_RRR1_MSUBADR_H_16_UU
:
7773 gen_msubadr32_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7774 cpu_gpr_d
[r2
], n
, MODE_UU
);
7776 case OPC2_32_RRR1_MSUBADRS_H_16_LL
:
7777 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7778 cpu_gpr_d
[r2
], n
, MODE_LL
);
7780 case OPC2_32_RRR1_MSUBADRS_H_16_LU
:
7781 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7782 cpu_gpr_d
[r2
], n
, MODE_LU
);
7784 case OPC2_32_RRR1_MSUBADRS_H_16_UL
:
7785 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7786 cpu_gpr_d
[r2
], n
, MODE_UL
);
7788 case OPC2_32_RRR1_MSUBADRS_H_16_UU
:
7789 gen_msubadr32s_h(cpu_gpr_d
[r4
], cpu_gpr_d
[r3
], cpu_gpr_d
[r1
],
7790 cpu_gpr_d
[r2
], n
, MODE_UU
);
7793 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7798 static void decode_rrrr_extract_insert(DisasContext
*ctx
)
7802 TCGv tmp_width
, tmp_pos
;
7804 r1
= MASK_OP_RRRR_S1(ctx
->opcode
);
7805 r2
= MASK_OP_RRRR_S2(ctx
->opcode
);
7806 r3
= MASK_OP_RRRR_S3(ctx
->opcode
);
7807 r4
= MASK_OP_RRRR_D(ctx
->opcode
);
7808 op2
= MASK_OP_RRRR_OP2(ctx
->opcode
);
7810 tmp_pos
= tcg_temp_new();
7811 tmp_width
= tcg_temp_new();
7814 case OPC2_32_RRRR_DEXTR
:
7815 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7817 tcg_gen_rotl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7819 TCGv msw
= tcg_temp_new();
7820 TCGv zero
= tcg_constant_tl(0);
7821 tcg_gen_shl_tl(tmp_width
, cpu_gpr_d
[r1
], tmp_pos
);
7822 tcg_gen_subfi_tl(msw
, 32, tmp_pos
);
7823 tcg_gen_shr_tl(msw
, cpu_gpr_d
[r2
], msw
);
7825 * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined
7826 * behaviour. So check that case here and set the low bits to zero
7827 * which effectivly returns cpu_gpr_d[r1]
7829 tcg_gen_movcond_tl(TCG_COND_EQ
, msw
, tmp_pos
, zero
, zero
, msw
);
7830 tcg_gen_or_tl(cpu_gpr_d
[r4
], tmp_width
, msw
);
7833 case OPC2_32_RRRR_EXTR
:
7834 case OPC2_32_RRRR_EXTR_U
:
7836 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7837 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7838 tcg_gen_add_tl(tmp_pos
, tmp_pos
, tmp_width
);
7839 tcg_gen_subfi_tl(tmp_pos
, 32, tmp_pos
);
7840 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], tmp_pos
);
7841 tcg_gen_subfi_tl(tmp_width
, 32, tmp_width
);
7842 if (op2
== OPC2_32_RRRR_EXTR
) {
7843 tcg_gen_sar_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7845 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], tmp_width
);
7848 case OPC2_32_RRRR_INSERT
:
7850 tcg_gen_andi_tl(tmp_width
, cpu_gpr_d
[r3
+1], 0x1f);
7851 tcg_gen_andi_tl(tmp_pos
, cpu_gpr_d
[r3
], 0x1f);
7852 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], tmp_width
,
7856 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7861 static void decode_rrrw_extract_insert(DisasContext
*ctx
)
7869 op2
= MASK_OP_RRRW_OP2(ctx
->opcode
);
7870 r1
= MASK_OP_RRRW_S1(ctx
->opcode
);
7871 r2
= MASK_OP_RRRW_S2(ctx
->opcode
);
7872 r3
= MASK_OP_RRRW_S3(ctx
->opcode
);
7873 r4
= MASK_OP_RRRW_D(ctx
->opcode
);
7874 width
= MASK_OP_RRRW_WIDTH(ctx
->opcode
);
7876 temp
= tcg_temp_new();
7879 case OPC2_32_RRRW_EXTR
:
7880 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7881 tcg_gen_addi_tl(temp
, temp
, width
);
7882 tcg_gen_subfi_tl(temp
, 32, temp
);
7883 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7884 tcg_gen_sari_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], 32 - width
);
7886 case OPC2_32_RRRW_EXTR_U
:
7888 tcg_gen_movi_tl(cpu_gpr_d
[r4
], 0);
7890 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7891 tcg_gen_shr_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], temp
);
7892 tcg_gen_andi_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r4
], ~0u >> (32-width
));
7895 case OPC2_32_RRRW_IMASK
:
7896 temp2
= tcg_temp_new();
7898 tcg_gen_andi_tl(temp
, cpu_gpr_d
[r3
], 0x1f);
7899 tcg_gen_movi_tl(temp2
, (1 << width
) - 1);
7900 tcg_gen_shl_tl(temp2
, temp2
, temp
);
7901 tcg_gen_shl_tl(cpu_gpr_d
[r4
], cpu_gpr_d
[r2
], temp
);
7902 tcg_gen_mov_tl(cpu_gpr_d
[r4
+1], temp2
);
7904 case OPC2_32_RRRW_INSERT
:
7905 temp2
= tcg_temp_new();
7907 tcg_gen_movi_tl(temp
, width
);
7908 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r3
], 0x1f);
7909 gen_insert(cpu_gpr_d
[r4
], cpu_gpr_d
[r1
], cpu_gpr_d
[r2
], temp
, temp2
);
7912 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7917 static void decode_sys_interrupts(DisasContext
*ctx
)
7924 op2
= MASK_OP_SYS_OP2(ctx
->opcode
);
7925 r1
= MASK_OP_SYS_S1D(ctx
->opcode
);
7928 case OPC2_32_SYS_DEBUG
:
7929 /* raise EXCP_DEBUG */
7931 case OPC2_32_SYS_DISABLE
:
7932 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7933 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7935 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7938 case OPC2_32_SYS_DISABLE_D
:
7939 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7940 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7941 tcg_gen_extract_tl(cpu_gpr_d
[r1
], cpu_ICR
,
7942 ctx
->icr_ie_offset
, 1);
7943 tcg_gen_andi_tl(cpu_ICR
, cpu_ICR
, ~ctx
->icr_ie_mask
);
7945 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7948 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
7950 case OPC2_32_SYS_DSYNC
:
7952 case OPC2_32_SYS_ENABLE
:
7953 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7954 tcg_gen_ori_tl(cpu_ICR
, cpu_ICR
, ctx
->icr_ie_mask
);
7955 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
7957 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7960 case OPC2_32_SYS_ISYNC
:
7962 case OPC2_32_SYS_NOP
:
7964 case OPC2_32_SYS_RET
:
7965 gen_compute_branch(ctx
, op2
, 0, 0, 0, 0);
7967 case OPC2_32_SYS_FRET
:
7970 case OPC2_32_SYS_RFE
:
7971 gen_helper_rfe(tcg_env
);
7972 ctx
->base
.is_jmp
= DISAS_EXIT
;
7974 case OPC2_32_SYS_RFM
:
7975 if (ctx
->priv
== TRICORE_PRIV_SM
) {
7976 tmp
= tcg_temp_new();
7977 l1
= gen_new_label();
7979 tcg_gen_ld32u_tl(tmp
, tcg_env
, offsetof(CPUTriCoreState
, DBGSR
));
7980 tcg_gen_andi_tl(tmp
, tmp
, MASK_DBGSR_DE
);
7981 tcg_gen_brcondi_tl(TCG_COND_NE
, tmp
, 1, l1
);
7982 gen_helper_rfm(tcg_env
);
7984 ctx
->base
.is_jmp
= DISAS_EXIT
;
7986 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
7989 case OPC2_32_SYS_RSLCX
:
7990 gen_helper_rslcx(tcg_env
);
7992 case OPC2_32_SYS_SVLCX
:
7993 gen_helper_svlcx(tcg_env
);
7995 case OPC2_32_SYS_RESTORE
:
7996 if (has_feature(ctx
, TRICORE_FEATURE_16
)) {
7997 if (ctx
->priv
== TRICORE_PRIV_SM
|| ctx
->priv
== TRICORE_PRIV_UM1
) {
7998 tcg_gen_deposit_tl(cpu_ICR
, cpu_ICR
, cpu_gpr_d
[r1
],
7999 ctx
->icr_ie_offset
, 1);
8000 ctx
->base
.is_jmp
= DISAS_EXIT_UPDATE
;
8002 generate_trap(ctx
, TRAPC_PROT
, TIN1_PRIV
);
8005 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8008 case OPC2_32_SYS_TRAPSV
:
8009 l1
= gen_new_label();
8010 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_SV
, 0, l1
);
8011 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_SOVF
);
8014 case OPC2_32_SYS_TRAPV
:
8015 l1
= gen_new_label();
8016 tcg_gen_brcondi_tl(TCG_COND_GE
, cpu_PSW_V
, 0, l1
);
8017 generate_trap(ctx
, TRAPC_ASSERT
, TIN5_OVF
);
8021 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8025 static void decode_32Bit_opc(DisasContext
*ctx
)
8029 int32_t address
, const16
;
8032 TCGv temp
, temp2
, temp3
;
8034 op1
= MASK_OP_MAJOR(ctx
->opcode
);
8036 /* handle JNZ.T opcode only being 7 bit long */
8037 if (unlikely((op1
& 0x7f) == OPCM_32_BRN_JTT
)) {
8038 op1
= OPCM_32_BRN_JTT
;
8043 case OPCM_32_ABS_LDW
:
8044 decode_abs_ldw(ctx
);
8046 case OPCM_32_ABS_LDB
:
8047 decode_abs_ldb(ctx
);
8049 case OPCM_32_ABS_LDMST_SWAP
:
8050 decode_abs_ldst_swap(ctx
);
8052 case OPCM_32_ABS_LDST_CONTEXT
:
8053 decode_abs_ldst_context(ctx
);
8055 case OPCM_32_ABS_STORE
:
8056 decode_abs_store(ctx
);
8058 case OPCM_32_ABS_STOREB_H
:
8059 decode_abs_storeb_h(ctx
);
8061 case OPC1_32_ABS_STOREQ
:
8062 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8063 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8064 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8065 temp2
= tcg_temp_new();
8067 tcg_gen_shri_tl(temp2
, cpu_gpr_d
[r1
], 16);
8068 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_LEUW
);
8070 case OPC1_32_ABS_LD_Q
:
8071 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8072 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8073 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8075 tcg_gen_qemu_ld_tl(cpu_gpr_d
[r1
], temp
, ctx
->mem_idx
, MO_LEUW
);
8076 tcg_gen_shli_tl(cpu_gpr_d
[r1
], cpu_gpr_d
[r1
], 16);
8078 case OPCM_32_ABS_LEA_LHA
:
8079 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8080 r1
= MASK_OP_ABS_S1D(ctx
->opcode
);
8082 if (has_feature(ctx
, TRICORE_FEATURE_162
)) {
8083 op2
= MASK_OP_ABS_OP2(ctx
->opcode
);
8084 if (op2
== OPC2_32_ABS_LHA
) {
8085 tcg_gen_movi_tl(cpu_gpr_a
[r1
], address
<< 14);
8088 /* otherwise translate regular LEA */
8091 tcg_gen_movi_tl(cpu_gpr_a
[r1
], EA_ABS_FORMAT(address
));
8094 case OPC1_32_ABSB_ST_T
:
8095 address
= MASK_OP_ABS_OFF18(ctx
->opcode
);
8096 b
= MASK_OP_ABSB_B(ctx
->opcode
);
8097 bpos
= MASK_OP_ABSB_BPOS(ctx
->opcode
);
8099 temp
= tcg_constant_i32(EA_ABS_FORMAT(address
));
8100 temp2
= tcg_temp_new();
8102 tcg_gen_qemu_ld_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8103 tcg_gen_andi_tl(temp2
, temp2
, ~(0x1u
<< bpos
));
8104 tcg_gen_ori_tl(temp2
, temp2
, (b
<< bpos
));
8105 tcg_gen_qemu_st_tl(temp2
, temp
, ctx
->mem_idx
, MO_UB
);
8108 case OPC1_32_B_CALL
:
8109 case OPC1_32_B_CALLA
:
8110 case OPC1_32_B_FCALL
:
8111 case OPC1_32_B_FCALLA
:
8116 address
= MASK_OP_B_DISP24_SEXT(ctx
->opcode
);
8117 gen_compute_branch(ctx
, op1
, 0, 0, 0, address
);
8120 case OPCM_32_BIT_ANDACC
:
8121 decode_bit_andacc(ctx
);
8123 case OPCM_32_BIT_LOGICAL_T1
:
8124 decode_bit_logical_t(ctx
);
8126 case OPCM_32_BIT_INSERT
:
8127 decode_bit_insert(ctx
);
8129 case OPCM_32_BIT_LOGICAL_T2
:
8130 decode_bit_logical_t2(ctx
);
8132 case OPCM_32_BIT_ORAND
:
8133 decode_bit_orand(ctx
);
8135 case OPCM_32_BIT_SH_LOGIC1
:
8136 decode_bit_sh_logic1(ctx
);
8138 case OPCM_32_BIT_SH_LOGIC2
:
8139 decode_bit_sh_logic2(ctx
);
8142 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE
:
8143 decode_bo_addrmode_post_pre_base(ctx
);
8145 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR
:
8146 decode_bo_addrmode_bitreverse_circular(ctx
);
8148 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE
:
8149 decode_bo_addrmode_ld_post_pre_base(ctx
);
8151 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR
:
8152 decode_bo_addrmode_ld_bitreverse_circular(ctx
);
8154 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE
:
8155 decode_bo_addrmode_stctx_post_pre_base(ctx
);
8157 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR
:
8158 decode_bo_addrmode_ldmst_bitreverse_circular(ctx
);
8161 case OPC1_32_BOL_LD_A_LONGOFF
:
8162 case OPC1_32_BOL_LD_W_LONGOFF
:
8163 case OPC1_32_BOL_LEA_LONGOFF
:
8164 case OPC1_32_BOL_ST_W_LONGOFF
:
8165 case OPC1_32_BOL_ST_A_LONGOFF
:
8166 case OPC1_32_BOL_LD_B_LONGOFF
:
8167 case OPC1_32_BOL_LD_BU_LONGOFF
:
8168 case OPC1_32_BOL_LD_H_LONGOFF
:
8169 case OPC1_32_BOL_LD_HU_LONGOFF
:
8170 case OPC1_32_BOL_ST_B_LONGOFF
:
8171 case OPC1_32_BOL_ST_H_LONGOFF
:
8172 decode_bol_opc(ctx
, op1
);
8175 case OPCM_32_BRC_EQ_NEQ
:
8176 case OPCM_32_BRC_GE
:
8177 case OPCM_32_BRC_JLT
:
8178 case OPCM_32_BRC_JNE
:
8179 const4
= MASK_OP_BRC_CONST4_SEXT(ctx
->opcode
);
8180 address
= MASK_OP_BRC_DISP15_SEXT(ctx
->opcode
);
8181 r1
= MASK_OP_BRC_S1(ctx
->opcode
);
8182 gen_compute_branch(ctx
, op1
, r1
, 0, const4
, address
);
8185 case OPCM_32_BRN_JTT
:
8186 address
= MASK_OP_BRN_DISP15_SEXT(ctx
->opcode
);
8187 r1
= MASK_OP_BRN_S1(ctx
->opcode
);
8188 gen_compute_branch(ctx
, op1
, r1
, 0, 0, address
);
8191 case OPCM_32_BRR_EQ_NEQ
:
8192 case OPCM_32_BRR_ADDR_EQ_NEQ
:
8193 case OPCM_32_BRR_GE
:
8194 case OPCM_32_BRR_JLT
:
8195 case OPCM_32_BRR_JNE
:
8196 case OPCM_32_BRR_JNZ
:
8197 case OPCM_32_BRR_LOOP
:
8198 address
= MASK_OP_BRR_DISP15_SEXT(ctx
->opcode
);
8199 r2
= MASK_OP_BRR_S2(ctx
->opcode
);
8200 r1
= MASK_OP_BRR_S1(ctx
->opcode
);
8201 gen_compute_branch(ctx
, op1
, r1
, r2
, 0, address
);
8204 case OPCM_32_RC_LOGICAL_SHIFT
:
8205 decode_rc_logical_shift(ctx
);
8207 case OPCM_32_RC_ACCUMULATOR
:
8208 decode_rc_accumulator(ctx
);
8210 case OPCM_32_RC_SERVICEROUTINE
:
8211 decode_rc_serviceroutine(ctx
);
8213 case OPCM_32_RC_MUL
:
8217 case OPCM_32_RCPW_MASK_INSERT
:
8218 decode_rcpw_insert(ctx
);
8221 case OPC1_32_RCRR_INSERT
:
8222 r1
= MASK_OP_RCRR_S1(ctx
->opcode
);
8223 r2
= MASK_OP_RCRR_S3(ctx
->opcode
);
8224 r3
= MASK_OP_RCRR_D(ctx
->opcode
);
8225 const16
= MASK_OP_RCRR_CONST4(ctx
->opcode
);
8226 temp
= tcg_constant_i32(const16
);
8227 temp2
= tcg_temp_new(); /* width*/
8228 temp3
= tcg_temp_new(); /* pos */
8232 tcg_gen_andi_tl(temp2
, cpu_gpr_d
[r2
+ 1], 0x1f);
8233 tcg_gen_andi_tl(temp3
, cpu_gpr_d
[r2
], 0x1f);
8235 gen_insert(cpu_gpr_d
[r3
], cpu_gpr_d
[r1
], temp
, temp2
, temp3
);
8238 case OPCM_32_RCRW_MASK_INSERT
:
8239 decode_rcrw_insert(ctx
);
8242 case OPCM_32_RCR_COND_SELECT
:
8243 decode_rcr_cond_select(ctx
);
8245 case OPCM_32_RCR_MADD
:
8246 decode_rcr_madd(ctx
);
8248 case OPCM_32_RCR_MSUB
:
8249 decode_rcr_msub(ctx
);
8252 case OPC1_32_RLC_ADDI
:
8253 case OPC1_32_RLC_ADDIH
:
8254 case OPC1_32_RLC_ADDIH_A
:
8255 case OPC1_32_RLC_MFCR
:
8256 case OPC1_32_RLC_MOV
:
8257 case OPC1_32_RLC_MOV_64
:
8258 case OPC1_32_RLC_MOV_U
:
8259 case OPC1_32_RLC_MOV_H
:
8260 case OPC1_32_RLC_MOVH_A
:
8261 case OPC1_32_RLC_MTCR
:
8262 decode_rlc_opc(ctx
, op1
);
8265 case OPCM_32_RR_ACCUMULATOR
:
8266 decode_rr_accumulator(ctx
);
8268 case OPCM_32_RR_LOGICAL_SHIFT
:
8269 decode_rr_logical_shift(ctx
);
8271 case OPCM_32_RR_ADDRESS
:
8272 decode_rr_address(ctx
);
8274 case OPCM_32_RR_IDIRECT
:
8275 decode_rr_idirect(ctx
);
8277 case OPCM_32_RR_DIVIDE
:
8278 decode_rr_divide(ctx
);
8281 case OPCM_32_RR1_MUL
:
8282 decode_rr1_mul(ctx
);
8284 case OPCM_32_RR1_MULQ
:
8285 decode_rr1_mulq(ctx
);
8288 case OPCM_32_RR2_MUL
:
8289 decode_rr2_mul(ctx
);
8292 case OPCM_32_RRPW_EXTRACT_INSERT
:
8293 decode_rrpw_extract_insert(ctx
);
8295 case OPC1_32_RRPW_DEXTR
:
8296 r1
= MASK_OP_RRPW_S1(ctx
->opcode
);
8297 r2
= MASK_OP_RRPW_S2(ctx
->opcode
);
8298 r3
= MASK_OP_RRPW_D(ctx
->opcode
);
8299 const16
= MASK_OP_RRPW_POS(ctx
->opcode
);
8301 tcg_gen_extract2_tl(cpu_gpr_d
[r3
], cpu_gpr_d
[r2
], cpu_gpr_d
[r1
],
8305 case OPCM_32_RRR_COND_SELECT
:
8306 decode_rrr_cond_select(ctx
);
8308 case OPCM_32_RRR_DIVIDE
:
8309 decode_rrr_divide(ctx
);
8312 case OPCM_32_RRR2_MADD
:
8313 decode_rrr2_madd(ctx
);
8315 case OPCM_32_RRR2_MSUB
:
8316 decode_rrr2_msub(ctx
);
8319 case OPCM_32_RRR1_MADD
:
8320 decode_rrr1_madd(ctx
);
8322 case OPCM_32_RRR1_MADDQ_H
:
8323 decode_rrr1_maddq_h(ctx
);
8325 case OPCM_32_RRR1_MADDSU_H
:
8326 decode_rrr1_maddsu_h(ctx
);
8328 case OPCM_32_RRR1_MSUB_H
:
8329 decode_rrr1_msub(ctx
);
8331 case OPCM_32_RRR1_MSUB_Q
:
8332 decode_rrr1_msubq_h(ctx
);
8334 case OPCM_32_RRR1_MSUBAD_H
:
8335 decode_rrr1_msubad_h(ctx
);
8338 case OPCM_32_RRRR_EXTRACT_INSERT
:
8339 decode_rrrr_extract_insert(ctx
);
8342 case OPCM_32_RRRW_EXTRACT_INSERT
:
8343 decode_rrrw_extract_insert(ctx
);
8346 case OPCM_32_SYS_INTERRUPTS
:
8347 decode_sys_interrupts(ctx
);
8349 case OPC1_32_SYS_RSTV
:
8350 tcg_gen_movi_tl(cpu_PSW_V
, 0);
8351 tcg_gen_mov_tl(cpu_PSW_SV
, cpu_PSW_V
);
8352 tcg_gen_mov_tl(cpu_PSW_AV
, cpu_PSW_V
);
8353 tcg_gen_mov_tl(cpu_PSW_SAV
, cpu_PSW_V
);
8356 generate_trap(ctx
, TRAPC_INSN_ERR
, TIN2_IOPC
);
8360 static bool tricore_insn_is_16bit(uint32_t insn
)
8362 return (insn
& 0x1) == 0;
8365 static void tricore_tr_init_disas_context(DisasContextBase
*dcbase
,
8368 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8369 CPUTriCoreState
*env
= cpu_env(cs
);
8370 ctx
->mem_idx
= cpu_mmu_index(env
, false);
8372 uint32_t tb_flags
= (uint32_t)ctx
->base
.tb
->flags
;
8373 ctx
->priv
= FIELD_EX32(tb_flags
, TB_FLAGS
, PRIV
);
8375 ctx
->features
= env
->features
;
8376 if (has_feature(ctx
, TRICORE_FEATURE_161
)) {
8377 ctx
->icr_ie_mask
= R_ICR_IE_161_MASK
;
8378 ctx
->icr_ie_offset
= R_ICR_IE_161_SHIFT
;
8380 ctx
->icr_ie_mask
= R_ICR_IE_13_MASK
;
8381 ctx
->icr_ie_offset
= R_ICR_IE_13_SHIFT
;
8385 static void tricore_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8389 static void tricore_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8391 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8393 tcg_gen_insn_start(ctx
->base
.pc_next
);
8396 static bool insn_crosses_page(CPUTriCoreState
*env
, DisasContext
*ctx
)
8399 * Return true if the insn at ctx->base.pc_next might cross a page boundary.
8400 * (False positives are OK, false negatives are not.)
8401 * Our caller ensures we are only called if dc->base.pc_next is less than
8402 * 4 bytes from the page boundary, so we cross the page if the first
8403 * 16 bits indicate that this is a 32 bit insn.
8405 uint16_t insn
= translator_lduw(env
, &ctx
->base
, ctx
->base
.pc_next
);
8407 return !tricore_insn_is_16bit(insn
);
8411 static void tricore_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8413 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8414 CPUTriCoreState
*env
= cpu_env(cpu
);
8418 insn_lo
= translator_lduw(env
, &ctx
->base
, ctx
->base
.pc_next
);
8419 is_16bit
= tricore_insn_is_16bit(insn_lo
);
8421 ctx
->opcode
= insn_lo
;
8422 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 2;
8423 decode_16Bit_opc(ctx
);
8425 uint32_t insn_hi
= translator_lduw(env
, &ctx
->base
,
8426 ctx
->base
.pc_next
+ 2);
8427 ctx
->opcode
= insn_hi
<< 16 | insn_lo
;
8428 ctx
->pc_succ_insn
= ctx
->base
.pc_next
+ 4;
8429 decode_32Bit_opc(ctx
);
8431 ctx
->base
.pc_next
= ctx
->pc_succ_insn
;
8433 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
8434 target_ulong page_start
;
8436 page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
8437 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
8438 || (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- 3
8439 && insn_crosses_page(env
, ctx
))) {
8440 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
8445 static void tricore_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8447 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
8449 switch (ctx
->base
.is_jmp
) {
8450 case DISAS_TOO_MANY
:
8451 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
8453 case DISAS_EXIT_UPDATE
:
8454 gen_save_pc(ctx
->base
.pc_next
);
8457 tcg_gen_exit_tb(NULL
, 0);
8460 tcg_gen_lookup_and_goto_ptr();
8462 case DISAS_NORETURN
:
8465 g_assert_not_reached();
8469 static void tricore_tr_disas_log(const DisasContextBase
*dcbase
,
8470 CPUState
*cpu
, FILE *logfile
)
8472 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
8473 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
8476 static const TranslatorOps tricore_tr_ops
= {
8477 .init_disas_context
= tricore_tr_init_disas_context
,
8478 .tb_start
= tricore_tr_tb_start
,
8479 .insn_start
= tricore_tr_insn_start
,
8480 .translate_insn
= tricore_tr_translate_insn
,
8481 .tb_stop
= tricore_tr_tb_stop
,
8482 .disas_log
= tricore_tr_disas_log
,
8486 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
8487 target_ulong pc
, void *host_pc
)
8490 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
,
8491 &tricore_tr_ops
, &ctx
.base
);
8500 void cpu_state_reset(CPUTriCoreState
*env
)
8502 /* Reset Regs to Default Value */
8507 static void tricore_tcg_init_csfr(void)
8509 cpu_PCXI
= tcg_global_mem_new(tcg_env
,
8510 offsetof(CPUTriCoreState
, PCXI
), "PCXI");
8511 cpu_PSW
= tcg_global_mem_new(tcg_env
,
8512 offsetof(CPUTriCoreState
, PSW
), "PSW");
8513 cpu_PC
= tcg_global_mem_new(tcg_env
,
8514 offsetof(CPUTriCoreState
, PC
), "PC");
8515 cpu_ICR
= tcg_global_mem_new(tcg_env
,
8516 offsetof(CPUTriCoreState
, ICR
), "ICR");
8519 void tricore_tcg_init(void)
8524 for (i
= 0 ; i
< 16 ; i
++) {
8525 cpu_gpr_a
[i
] = tcg_global_mem_new(tcg_env
,
8526 offsetof(CPUTriCoreState
, gpr_a
[i
]),
8529 for (i
= 0 ; i
< 16 ; i
++) {
8530 cpu_gpr_d
[i
] = tcg_global_mem_new(tcg_env
,
8531 offsetof(CPUTriCoreState
, gpr_d
[i
]),
8534 tricore_tcg_init_csfr();
8535 /* init PSW flag cache */
8536 cpu_PSW_C
= tcg_global_mem_new(tcg_env
,
8537 offsetof(CPUTriCoreState
, PSW_USB_C
),
8539 cpu_PSW_V
= tcg_global_mem_new(tcg_env
,
8540 offsetof(CPUTriCoreState
, PSW_USB_V
),
8542 cpu_PSW_SV
= tcg_global_mem_new(tcg_env
,
8543 offsetof(CPUTriCoreState
, PSW_USB_SV
),
8545 cpu_PSW_AV
= tcg_global_mem_new(tcg_env
,
8546 offsetof(CPUTriCoreState
, PSW_USB_AV
),
8548 cpu_PSW_SAV
= tcg_global_mem_new(tcg_env
,
8549 offsetof(CPUTriCoreState
, PSW_USB_SAV
),