Merge remote-tracking branch 'remotes/lalrae/tags/mips-20150213-2' into staging
[qemu/ar7.git] / target-tricore / translate.c
blob996435dbd0d04c3edd0fd9941d2196035a22225f
1 /*
2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
32 * TCG registers
34 static TCGv cpu_PC;
35 static TCGv cpu_PCXI;
36 static TCGv cpu_PSW;
37 static TCGv cpu_ICR;
38 /* GPR registers */
39 static TCGv cpu_gpr_a[16];
40 static TCGv cpu_gpr_d[16];
41 /* PSW Flag cache */
42 static TCGv cpu_PSW_C;
43 static TCGv cpu_PSW_V;
44 static TCGv cpu_PSW_SV;
45 static TCGv cpu_PSW_AV;
46 static TCGv cpu_PSW_SAV;
47 /* CPU env */
48 static TCGv_ptr cpu_env;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext {
65 struct TranslationBlock *tb;
66 target_ulong pc, saved_pc, next_pc;
67 uint32_t opcode;
68 int singlestep_enabled;
69 /* Routine used to access memory */
70 int mem_idx;
71 uint32_t hflags, saved_hflags;
72 int bstate;
73 } DisasContext;
75 enum {
77 BS_NONE = 0,
78 BS_STOP = 1,
79 BS_BRANCH = 2,
80 BS_EXCP = 3,
83 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
84 fprintf_function cpu_fprintf, int flags)
86 TriCoreCPU *cpu = TRICORE_CPU(cs);
87 CPUTriCoreState *env = &cpu->env;
88 uint32_t psw;
89 int i;
91 psw = psw_read(env);
93 cpu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC);
94 cpu_fprintf(f, " PSW: " TARGET_FMT_lx, psw);
95 cpu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR);
96 cpu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI);
97 cpu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX);
98 cpu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX);
100 for (i = 0; i < 16; ++i) {
101 if ((i & 3) == 0) {
102 cpu_fprintf(f, "\nGPR A%02d:", i);
104 cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]);
106 for (i = 0; i < 16; ++i) {
107 if ((i & 3) == 0) {
108 cpu_fprintf(f, "\nGPR D%02d:", i);
110 cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]);
112 cpu_fprintf(f, "\n");
116 * Functions to generate micro-ops
119 /* Makros for generating helpers */
121 #define gen_helper_1arg(name, arg) do { \
122 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
123 gen_helper_##name(cpu_env, helper_tmp); \
124 tcg_temp_free_i32(helper_tmp); \
125 } while (0)
127 #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
128 TCGv arg00 = tcg_temp_new(); \
129 TCGv arg01 = tcg_temp_new(); \
130 TCGv arg11 = tcg_temp_new(); \
131 tcg_gen_sari_tl(arg00, arg0, 16); \
132 tcg_gen_ext16s_tl(arg01, arg0); \
133 tcg_gen_ext16s_tl(arg11, arg1); \
134 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
135 tcg_temp_free(arg00); \
136 tcg_temp_free(arg01); \
137 tcg_temp_free(arg11); \
138 } while (0)
140 #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
141 TCGv arg00 = tcg_temp_new(); \
142 TCGv arg01 = tcg_temp_new(); \
143 TCGv arg10 = tcg_temp_new(); \
144 TCGv arg11 = tcg_temp_new(); \
145 tcg_gen_sari_tl(arg00, arg0, 16); \
146 tcg_gen_ext16s_tl(arg01, arg0); \
147 tcg_gen_sari_tl(arg11, arg1, 16); \
148 tcg_gen_ext16s_tl(arg10, arg1); \
149 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
150 tcg_temp_free(arg00); \
151 tcg_temp_free(arg01); \
152 tcg_temp_free(arg10); \
153 tcg_temp_free(arg11); \
154 } while (0)
156 #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
157 TCGv arg00 = tcg_temp_new(); \
158 TCGv arg01 = tcg_temp_new(); \
159 TCGv arg10 = tcg_temp_new(); \
160 TCGv arg11 = tcg_temp_new(); \
161 tcg_gen_sari_tl(arg00, arg0, 16); \
162 tcg_gen_ext16s_tl(arg01, arg0); \
163 tcg_gen_sari_tl(arg10, arg1, 16); \
164 tcg_gen_ext16s_tl(arg11, arg1); \
165 gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
166 tcg_temp_free(arg00); \
167 tcg_temp_free(arg01); \
168 tcg_temp_free(arg10); \
169 tcg_temp_free(arg11); \
170 } while (0)
172 #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
173 TCGv arg00 = tcg_temp_new(); \
174 TCGv arg01 = tcg_temp_new(); \
175 TCGv arg11 = tcg_temp_new(); \
176 tcg_gen_sari_tl(arg01, arg0, 16); \
177 tcg_gen_ext16s_tl(arg00, arg0); \
178 tcg_gen_sari_tl(arg11, arg1, 16); \
179 gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
180 tcg_temp_free(arg00); \
181 tcg_temp_free(arg01); \
182 tcg_temp_free(arg11); \
183 } while (0)
185 #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
186 TCGv_i64 ret = tcg_temp_new_i64(); \
187 TCGv_i64 arg1 = tcg_temp_new_i64(); \
189 tcg_gen_concat_i32_i64(arg1, al1, ah1); \
190 gen_helper_##name(ret, arg1, arg2); \
191 tcg_gen_extr_i64_i32(rl, rh, ret); \
193 tcg_temp_free_i64(ret); \
194 tcg_temp_free_i64(arg1); \
195 } while (0)
197 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
198 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
199 ((offset & 0x0fffff) << 1))
201 /* Functions for load/save to/from memory */
203 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
204 int16_t con, TCGMemOp mop)
206 TCGv temp = tcg_temp_new();
207 tcg_gen_addi_tl(temp, r2, con);
208 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
209 tcg_temp_free(temp);
212 static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
213 int16_t con, TCGMemOp mop)
215 TCGv temp = tcg_temp_new();
216 tcg_gen_addi_tl(temp, r2, con);
217 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
218 tcg_temp_free(temp);
221 static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
223 TCGv_i64 temp = tcg_temp_new_i64();
225 tcg_gen_concat_i32_i64(temp, rl, rh);
226 tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
228 tcg_temp_free_i64(temp);
231 static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
232 DisasContext *ctx)
234 TCGv temp = tcg_temp_new();
235 tcg_gen_addi_tl(temp, base, con);
236 gen_st_2regs_64(rh, rl, temp, ctx);
237 tcg_temp_free(temp);
240 static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
242 TCGv_i64 temp = tcg_temp_new_i64();
244 tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
245 /* write back to two 32 bit regs */
246 tcg_gen_extr_i64_i32(rl, rh, temp);
248 tcg_temp_free_i64(temp);
251 static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
252 DisasContext *ctx)
254 TCGv temp = tcg_temp_new();
255 tcg_gen_addi_tl(temp, base, con);
256 gen_ld_2regs_64(rh, rl, temp, ctx);
257 tcg_temp_free(temp);
260 static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
261 TCGMemOp mop)
263 TCGv temp = tcg_temp_new();
264 tcg_gen_addi_tl(temp, r2, off);
265 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
266 tcg_gen_mov_tl(r2, temp);
267 tcg_temp_free(temp);
270 static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
271 TCGMemOp mop)
273 TCGv temp = tcg_temp_new();
274 tcg_gen_addi_tl(temp, r2, off);
275 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
276 tcg_gen_mov_tl(r2, temp);
277 tcg_temp_free(temp);
280 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
281 static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
283 TCGv temp = tcg_temp_new();
284 TCGv temp2 = tcg_temp_new();
286 /* temp = (M(EA, word) */
287 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
288 /* temp = temp & ~E[a][63:32]) */
289 tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
290 /* temp2 = (E[a][31:0] & E[a][63:32]); */
291 tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
292 /* temp = temp | temp2; */
293 tcg_gen_or_tl(temp, temp, temp2);
294 /* M(EA, word) = temp; */
295 tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
297 tcg_temp_free(temp);
298 tcg_temp_free(temp2);
301 /* tmp = M(EA, word);
302 M(EA, word) = D[a];
303 D[a] = tmp[31:0];*/
304 static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
306 TCGv temp = tcg_temp_new();
308 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
309 tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
310 tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
312 tcg_temp_free(temp);
315 /* We generate loads and store to core special function register (csfr) through
316 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
317 makros R, A and E, which allow read-only, all and endinit protected access.
318 These makros also specify in which ISA version the csfr was introduced. */
319 #define R(ADDRESS, REG, FEATURE) \
320 case ADDRESS: \
321 if (tricore_feature(env, FEATURE)) { \
322 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
324 break;
325 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
326 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
327 static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
329 /* since we're caching PSW make this a special case */
330 if (offset == 0xfe04) {
331 gen_helper_psw_read(ret, cpu_env);
332 } else {
333 switch (offset) {
334 #include "csfr.def"
338 #undef R
339 #undef A
340 #undef E
342 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
343 since no execption occurs */
344 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
345 case ADDRESS: \
346 if (tricore_feature(env, FEATURE)) { \
347 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
349 break;
350 /* Endinit protected registers
351 TODO: Since the endinit bit is in a register of a not yet implemented
352 watchdog device, we handle endinit protected registers like
353 all-access registers for now. */
354 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
355 static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
356 int32_t offset)
358 if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
359 /* since we're caching PSW make this a special case */
360 if (offset == 0xfe04) {
361 gen_helper_psw_write(cpu_env, r1);
362 } else {
363 switch (offset) {
364 #include "csfr.def"
367 } else {
368 /* generate privilege trap */
372 /* Functions for arithmetic instructions */
374 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
376 TCGv t0 = tcg_temp_new_i32();
377 TCGv result = tcg_temp_new_i32();
378 /* Addition and set V/SV bits */
379 tcg_gen_add_tl(result, r1, r2);
380 /* calc V bit */
381 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
382 tcg_gen_xor_tl(t0, r1, r2);
383 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
384 /* Calc SV bit */
385 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
386 /* Calc AV/SAV bits */
387 tcg_gen_add_tl(cpu_PSW_AV, result, result);
388 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
389 /* calc SAV */
390 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
391 /* write back result */
392 tcg_gen_mov_tl(ret, result);
394 tcg_temp_free(result);
395 tcg_temp_free(t0);
398 /* ret = r2 + (r1 * r3); */
399 static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
401 TCGv_i64 t1 = tcg_temp_new_i64();
402 TCGv_i64 t2 = tcg_temp_new_i64();
403 TCGv_i64 t3 = tcg_temp_new_i64();
405 tcg_gen_ext_i32_i64(t1, r1);
406 tcg_gen_ext_i32_i64(t2, r2);
407 tcg_gen_ext_i32_i64(t3, r3);
409 tcg_gen_mul_i64(t1, t1, t3);
410 tcg_gen_add_i64(t1, t2, t1);
412 tcg_gen_trunc_i64_i32(ret, t1);
413 /* calc V
414 t1 > 0x7fffffff */
415 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
416 /* t1 < -0x80000000 */
417 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
418 tcg_gen_or_i64(t2, t2, t3);
419 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
420 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
421 /* Calc SV bit */
422 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
423 /* Calc AV/SAV bits */
424 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
425 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
426 /* calc SAV */
427 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
429 tcg_temp_free_i64(t1);
430 tcg_temp_free_i64(t2);
431 tcg_temp_free_i64(t3);
434 static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
436 TCGv temp = tcg_const_i32(con);
437 gen_madd32_d(ret, r1, r2, temp);
438 tcg_temp_free(temp);
441 static inline void
442 gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
443 TCGv r3)
445 TCGv t1 = tcg_temp_new();
446 TCGv t2 = tcg_temp_new();
447 TCGv t3 = tcg_temp_new();
448 TCGv t4 = tcg_temp_new();
450 tcg_gen_muls2_tl(t1, t2, r1, r3);
451 /* only the add can overflow */
452 tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
453 /* calc V bit */
454 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
455 tcg_gen_xor_tl(t1, r2_high, t2);
456 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
457 /* Calc SV bit */
458 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
459 /* Calc AV/SAV bits */
460 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
461 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
462 /* calc SAV */
463 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
464 /* write back the result */
465 tcg_gen_mov_tl(ret_low, t3);
466 tcg_gen_mov_tl(ret_high, t4);
468 tcg_temp_free(t1);
469 tcg_temp_free(t2);
470 tcg_temp_free(t3);
471 tcg_temp_free(t4);
474 static inline void
475 gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
476 TCGv r3)
478 TCGv_i64 t1 = tcg_temp_new_i64();
479 TCGv_i64 t2 = tcg_temp_new_i64();
480 TCGv_i64 t3 = tcg_temp_new_i64();
482 tcg_gen_extu_i32_i64(t1, r1);
483 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
484 tcg_gen_extu_i32_i64(t3, r3);
486 tcg_gen_mul_i64(t1, t1, t3);
487 tcg_gen_add_i64(t2, t2, t1);
488 /* write back result */
489 tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
490 /* only the add overflows, if t2 < t1
491 calc V bit */
492 tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
493 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
494 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
495 /* Calc SV bit */
496 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
497 /* Calc AV/SAV bits */
498 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
499 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
500 /* calc SAV */
501 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
503 tcg_temp_free_i64(t1);
504 tcg_temp_free_i64(t2);
505 tcg_temp_free_i64(t3);
508 static inline void
509 gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
510 int32_t con)
512 TCGv temp = tcg_const_i32(con);
513 gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
514 tcg_temp_free(temp);
517 static inline void
518 gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
519 int32_t con)
521 TCGv temp = tcg_const_i32(con);
522 gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
523 tcg_temp_free(temp);
526 /* ret = r2 - (r1 * r3); */
527 static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
529 TCGv_i64 t1 = tcg_temp_new_i64();
530 TCGv_i64 t2 = tcg_temp_new_i64();
531 TCGv_i64 t3 = tcg_temp_new_i64();
533 tcg_gen_ext_i32_i64(t1, r1);
534 tcg_gen_ext_i32_i64(t2, r2);
535 tcg_gen_ext_i32_i64(t3, r3);
537 tcg_gen_mul_i64(t1, t1, t3);
538 tcg_gen_sub_i64(t1, t2, t1);
540 tcg_gen_trunc_i64_i32(ret, t1);
541 /* calc V
542 t2 > 0x7fffffff */
543 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
544 /* result < -0x80000000 */
545 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
546 tcg_gen_or_i64(t2, t2, t3);
547 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
548 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
550 /* Calc SV bit */
551 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
552 /* Calc AV/SAV bits */
553 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
554 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
555 /* calc SAV */
556 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
558 tcg_temp_free_i64(t1);
559 tcg_temp_free_i64(t2);
560 tcg_temp_free_i64(t3);
563 static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
565 TCGv temp = tcg_const_i32(con);
566 gen_msub32_d(ret, r1, r2, temp);
567 tcg_temp_free(temp);
570 static inline void
571 gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
572 TCGv r3)
574 TCGv t1 = tcg_temp_new();
575 TCGv t2 = tcg_temp_new();
576 TCGv t3 = tcg_temp_new();
577 TCGv t4 = tcg_temp_new();
579 tcg_gen_muls2_tl(t1, t2, r1, r3);
580 /* only the sub can overflow */
581 tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
582 /* calc V bit */
583 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
584 tcg_gen_xor_tl(t1, r2_high, t2);
585 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
586 /* Calc SV bit */
587 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
588 /* Calc AV/SAV bits */
589 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
590 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
591 /* calc SAV */
592 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
593 /* write back the result */
594 tcg_gen_mov_tl(ret_low, t3);
595 tcg_gen_mov_tl(ret_high, t4);
597 tcg_temp_free(t1);
598 tcg_temp_free(t2);
599 tcg_temp_free(t3);
600 tcg_temp_free(t4);
603 static inline void
604 gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
605 int32_t con)
607 TCGv temp = tcg_const_i32(con);
608 gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
609 tcg_temp_free(temp);
612 static inline void
613 gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
614 TCGv r3)
616 TCGv_i64 t1 = tcg_temp_new_i64();
617 TCGv_i64 t2 = tcg_temp_new_i64();
618 TCGv_i64 t3 = tcg_temp_new_i64();
620 tcg_gen_extu_i32_i64(t1, r1);
621 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
622 tcg_gen_extu_i32_i64(t3, r3);
624 tcg_gen_mul_i64(t1, t1, t3);
625 tcg_gen_sub_i64(t3, t2, t1);
626 tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
627 /* calc V bit, only the sub can overflow, if t1 > t2 */
628 tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
629 tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
630 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
631 /* Calc SV bit */
632 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
633 /* Calc AV/SAV bits */
634 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
635 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
636 /* calc SAV */
637 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
639 tcg_temp_free_i64(t1);
640 tcg_temp_free_i64(t2);
641 tcg_temp_free_i64(t3);
644 static inline void
645 gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
646 int32_t con)
648 TCGv temp = tcg_const_i32(con);
649 gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
650 tcg_temp_free(temp);
653 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
655 TCGv temp = tcg_const_i32(r2);
656 gen_add_d(ret, r1, temp);
657 tcg_temp_free(temp);
659 /* calculate the carry bit too */
660 static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
662 TCGv t0 = tcg_temp_new_i32();
663 TCGv result = tcg_temp_new_i32();
665 tcg_gen_movi_tl(t0, 0);
666 /* Addition and set C/V/SV bits */
667 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
668 /* calc V bit */
669 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
670 tcg_gen_xor_tl(t0, r1, r2);
671 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
672 /* Calc SV bit */
673 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
674 /* Calc AV/SAV bits */
675 tcg_gen_add_tl(cpu_PSW_AV, result, result);
676 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
677 /* calc SAV */
678 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
679 /* write back result */
680 tcg_gen_mov_tl(ret, result);
682 tcg_temp_free(result);
683 tcg_temp_free(t0);
686 static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
688 TCGv temp = tcg_const_i32(con);
689 gen_add_CC(ret, r1, temp);
690 tcg_temp_free(temp);
693 static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
695 TCGv carry = tcg_temp_new_i32();
696 TCGv t0 = tcg_temp_new_i32();
697 TCGv result = tcg_temp_new_i32();
699 tcg_gen_movi_tl(t0, 0);
700 tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
701 /* Addition, carry and set C/V/SV bits */
702 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
703 tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
704 /* calc V bit */
705 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
706 tcg_gen_xor_tl(t0, r1, r2);
707 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
708 /* Calc SV bit */
709 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
710 /* Calc AV/SAV bits */
711 tcg_gen_add_tl(cpu_PSW_AV, result, result);
712 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
713 /* calc SAV */
714 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
715 /* write back result */
716 tcg_gen_mov_tl(ret, result);
718 tcg_temp_free(result);
719 tcg_temp_free(t0);
720 tcg_temp_free(carry);
723 static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
725 TCGv temp = tcg_const_i32(con);
726 gen_addc_CC(ret, r1, temp);
727 tcg_temp_free(temp);
730 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
731 TCGv r4)
733 TCGv temp = tcg_temp_new();
734 TCGv temp2 = tcg_temp_new();
735 TCGv result = tcg_temp_new();
736 TCGv mask = tcg_temp_new();
737 TCGv t0 = tcg_const_i32(0);
739 /* create mask for sticky bits */
740 tcg_gen_setcond_tl(cond, mask, r4, t0);
741 tcg_gen_shli_tl(mask, mask, 31);
743 tcg_gen_add_tl(result, r1, r2);
744 /* Calc PSW_V */
745 tcg_gen_xor_tl(temp, result, r1);
746 tcg_gen_xor_tl(temp2, r1, r2);
747 tcg_gen_andc_tl(temp, temp, temp2);
748 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
749 /* Set PSW_SV */
750 tcg_gen_and_tl(temp, temp, mask);
751 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
752 /* calc AV bit */
753 tcg_gen_add_tl(temp, result, result);
754 tcg_gen_xor_tl(temp, temp, result);
755 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
756 /* calc SAV bit */
757 tcg_gen_and_tl(temp, temp, mask);
758 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
759 /* write back result */
760 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
762 tcg_temp_free(t0);
763 tcg_temp_free(temp);
764 tcg_temp_free(temp2);
765 tcg_temp_free(result);
766 tcg_temp_free(mask);
769 static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
770 TCGv r3, TCGv r4)
772 TCGv temp = tcg_const_i32(r2);
773 gen_cond_add(cond, r1, temp, r3, r4);
774 tcg_temp_free(temp);
777 static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
779 TCGv temp = tcg_temp_new_i32();
780 TCGv result = tcg_temp_new_i32();
782 tcg_gen_sub_tl(result, r1, r2);
783 /* calc V bit */
784 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
785 tcg_gen_xor_tl(temp, r1, r2);
786 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
787 /* calc SV bit */
788 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
789 /* Calc AV bit */
790 tcg_gen_add_tl(cpu_PSW_AV, result, result);
791 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
792 /* calc SAV bit */
793 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
794 /* write back result */
795 tcg_gen_mov_tl(ret, result);
797 tcg_temp_free(temp);
798 tcg_temp_free(result);
801 static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
803 TCGv result = tcg_temp_new();
804 TCGv temp = tcg_temp_new();
806 tcg_gen_sub_tl(result, r1, r2);
807 /* calc C bit */
808 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
809 /* calc V bit */
810 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
811 tcg_gen_xor_tl(temp, r1, r2);
812 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
813 /* calc SV bit */
814 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
815 /* Calc AV bit */
816 tcg_gen_add_tl(cpu_PSW_AV, result, result);
817 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
818 /* calc SAV bit */
819 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
820 /* write back result */
821 tcg_gen_mov_tl(ret, result);
823 tcg_temp_free(result);
824 tcg_temp_free(temp);
827 static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
829 TCGv temp = tcg_temp_new();
830 tcg_gen_not_tl(temp, r2);
831 gen_addc_CC(ret, r1, temp);
832 tcg_temp_free(temp);
835 static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
836 TCGv r4)
838 TCGv temp = tcg_temp_new();
839 TCGv temp2 = tcg_temp_new();
840 TCGv result = tcg_temp_new();
841 TCGv mask = tcg_temp_new();
842 TCGv t0 = tcg_const_i32(0);
844 /* create mask for sticky bits */
845 tcg_gen_setcond_tl(cond, mask, r4, t0);
846 tcg_gen_shli_tl(mask, mask, 31);
848 tcg_gen_sub_tl(result, r1, r2);
849 /* Calc PSW_V */
850 tcg_gen_xor_tl(temp, result, r1);
851 tcg_gen_xor_tl(temp2, r1, r2);
852 tcg_gen_and_tl(temp, temp, temp2);
853 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
854 /* Set PSW_SV */
855 tcg_gen_and_tl(temp, temp, mask);
856 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
857 /* calc AV bit */
858 tcg_gen_add_tl(temp, result, result);
859 tcg_gen_xor_tl(temp, temp, result);
860 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
861 /* calc SAV bit */
862 tcg_gen_and_tl(temp, temp, mask);
863 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
864 /* write back result */
865 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
867 tcg_temp_free(t0);
868 tcg_temp_free(temp);
869 tcg_temp_free(temp2);
870 tcg_temp_free(result);
871 tcg_temp_free(mask);
874 static inline void gen_abs(TCGv ret, TCGv r1)
876 TCGv temp = tcg_temp_new();
877 TCGv t0 = tcg_const_i32(0);
879 tcg_gen_neg_tl(temp, r1);
880 tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp);
881 /* overflow can only happen, if r1 = 0x80000000 */
882 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
883 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
884 /* calc SV bit */
885 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
886 /* Calc AV bit */
887 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
888 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
889 /* calc SAV bit */
890 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
892 tcg_temp_free(temp);
893 tcg_temp_free(t0);
896 static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
898 TCGv temp = tcg_temp_new_i32();
899 TCGv result = tcg_temp_new_i32();
901 tcg_gen_sub_tl(result, r1, r2);
902 tcg_gen_sub_tl(temp, r2, r1);
903 tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
905 /* calc V bit */
906 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
907 tcg_gen_xor_tl(temp, result, r2);
908 tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
909 tcg_gen_xor_tl(temp, r1, r2);
910 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
911 /* calc SV bit */
912 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
913 /* Calc AV bit */
914 tcg_gen_add_tl(cpu_PSW_AV, result, result);
915 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
916 /* calc SAV bit */
917 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
918 /* write back result */
919 tcg_gen_mov_tl(ret, result);
921 tcg_temp_free(temp);
922 tcg_temp_free(result);
925 static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
927 TCGv temp = tcg_const_i32(con);
928 gen_absdif(ret, r1, temp);
929 tcg_temp_free(temp);
932 static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
934 TCGv temp = tcg_const_i32(con);
935 gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
936 tcg_temp_free(temp);
939 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
941 TCGv high = tcg_temp_new();
942 TCGv low = tcg_temp_new();
944 tcg_gen_muls2_tl(low, high, r1, r2);
945 tcg_gen_mov_tl(ret, low);
946 /* calc V bit */
947 tcg_gen_sari_tl(low, low, 31);
948 tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
949 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
950 /* calc SV bit */
951 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
952 /* Calc AV bit */
953 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
954 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
955 /* calc SAV bit */
956 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
958 tcg_temp_free(high);
959 tcg_temp_free(low);
962 static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
964 TCGv temp = tcg_const_i32(con);
965 gen_mul_i32s(ret, r1, temp);
966 tcg_temp_free(temp);
969 static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
971 tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
972 /* clear V bit */
973 tcg_gen_movi_tl(cpu_PSW_V, 0);
974 /* calc SV bit */
975 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
976 /* Calc AV bit */
977 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
978 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
979 /* calc SAV bit */
980 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
983 static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
984 int32_t con)
986 TCGv temp = tcg_const_i32(con);
987 gen_mul_i64s(ret_low, ret_high, r1, temp);
988 tcg_temp_free(temp);
991 static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
993 tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
994 /* clear V bit */
995 tcg_gen_movi_tl(cpu_PSW_V, 0);
996 /* calc SV bit */
997 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
998 /* Calc AV bit */
999 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
1000 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
1001 /* calc SAV bit */
1002 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1005 static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
1006 int32_t con)
1008 TCGv temp = tcg_const_i32(con);
1009 gen_mul_i64u(ret_low, ret_high, r1, temp);
1010 tcg_temp_free(temp);
1013 static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
1015 TCGv temp = tcg_const_i32(con);
1016 gen_helper_mul_ssov(ret, cpu_env, r1, temp);
1017 tcg_temp_free(temp);
1020 static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
1022 TCGv temp = tcg_const_i32(con);
1023 gen_helper_mul_suov(ret, cpu_env, r1, temp);
1024 tcg_temp_free(temp);
1026 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
1027 static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
1029 TCGv temp = tcg_const_i32(con);
1030 gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
1031 tcg_temp_free(temp);
1034 static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
1036 TCGv temp = tcg_const_i32(con);
1037 gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
1038 tcg_temp_free(temp);
1041 static void
1042 gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
1044 TCGv temp = tcg_temp_new();
1045 TCGv_i64 temp_64 = tcg_temp_new_i64();
1046 TCGv_i64 temp2_64 = tcg_temp_new_i64();
1048 if (n == 0) {
1049 if (up_shift == 32) {
1050 tcg_gen_muls2_tl(rh, rl, arg1, arg2);
1051 } else if (up_shift == 16) {
1052 tcg_gen_ext_i32_i64(temp_64, arg1);
1053 tcg_gen_ext_i32_i64(temp2_64, arg2);
1055 tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
1056 tcg_gen_shri_i64(temp_64, temp_64, up_shift);
1057 tcg_gen_extr_i64_i32(rl, rh, temp_64);
1058 } else {
1059 tcg_gen_muls2_tl(rl, rh, arg1, arg2);
1061 /* reset v bit */
1062 tcg_gen_movi_tl(cpu_PSW_V, 0);
1063 } else { /* n is exspected to be 1 */
1064 tcg_gen_ext_i32_i64(temp_64, arg1);
1065 tcg_gen_ext_i32_i64(temp2_64, arg2);
1067 tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
1069 if (up_shift == 0) {
1070 tcg_gen_shli_i64(temp_64, temp_64, 1);
1071 } else {
1072 tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1);
1074 tcg_gen_extr_i64_i32(rl, rh, temp_64);
1075 /* overflow only occours if r1 = r2 = 0x8000 */
1076 if (up_shift == 0) {/* result is 64 bit */
1077 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
1078 0x80000000);
1079 } else { /* result is 32 bit */
1080 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
1081 0x80000000);
1083 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1084 /* calc sv overflow bit */
1085 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1087 /* calc av overflow bit */
1088 if (up_shift == 0) {
1089 tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
1090 tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
1091 } else {
1092 tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
1093 tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
1095 /* calc sav overflow bit */
1096 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1097 tcg_temp_free(temp);
1098 tcg_temp_free_i64(temp_64);
1099 tcg_temp_free_i64(temp2_64);
1102 static void
1103 gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
1105 TCGv temp = tcg_temp_new();
1106 if (n == 0) {
1107 tcg_gen_mul_tl(ret, arg1, arg2);
1108 } else { /* n is exspected to be 1 */
1109 tcg_gen_mul_tl(ret, arg1, arg2);
1110 tcg_gen_shli_tl(ret, ret, 1);
1111 /* catch special case r1 = r2 = 0x8000 */
1112 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
1113 tcg_gen_sub_tl(ret, ret, temp);
1115 /* reset v bit */
1116 tcg_gen_movi_tl(cpu_PSW_V, 0);
1117 /* calc av overflow bit */
1118 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
1119 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
1120 /* calc sav overflow bit */
1121 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1123 tcg_temp_free(temp);
1126 static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
1128 TCGv temp = tcg_temp_new();
1129 if (n == 0) {
1130 tcg_gen_mul_tl(ret, arg1, arg2);
1131 tcg_gen_addi_tl(ret, ret, 0x8000);
1132 } else {
1133 tcg_gen_mul_tl(ret, arg1, arg2);
1134 tcg_gen_shli_tl(ret, ret, 1);
1135 tcg_gen_addi_tl(ret, ret, 0x8000);
1136 /* catch special case r1 = r2 = 0x8000 */
1137 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
1138 tcg_gen_muli_tl(temp, temp, 0x8001);
1139 tcg_gen_sub_tl(ret, ret, temp);
1141 /* reset v bit */
1142 tcg_gen_movi_tl(cpu_PSW_V, 0);
1143 /* calc av overflow bit */
1144 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
1145 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
1146 /* calc sav overflow bit */
1147 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1148 /* cut halfword off */
1149 tcg_gen_andi_tl(ret, ret, 0xffff0000);
1151 tcg_temp_free(temp);
1154 static inline void
1155 gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1156 int32_t con)
1158 TCGv temp = tcg_const_i32(con);
1159 TCGv_i64 temp64 = tcg_temp_new_i64();
1160 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
1161 gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, temp);
1162 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
1163 tcg_temp_free(temp);
1164 tcg_temp_free_i64(temp64);
1167 static inline void
1168 gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1169 int32_t con)
1171 TCGv temp = tcg_const_i32(con);
1172 TCGv_i64 temp64 = tcg_temp_new_i64();
1173 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
1174 gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, temp);
1175 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
1176 tcg_temp_free(temp);
1177 tcg_temp_free_i64(temp64);
1180 static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
1182 TCGv temp = tcg_const_i32(con);
1183 gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
1184 tcg_temp_free(temp);
1187 static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
1189 TCGv temp = tcg_const_i32(con);
1190 gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
1191 tcg_temp_free(temp);
1194 static inline void
1195 gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1196 int32_t con)
1198 TCGv temp = tcg_const_i32(con);
1199 TCGv_i64 temp64 = tcg_temp_new_i64();
1200 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
1201 gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, temp);
1202 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
1203 tcg_temp_free(temp);
1204 tcg_temp_free_i64(temp64);
1207 static inline void
1208 gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
1209 int32_t con)
1211 TCGv temp = tcg_const_i32(con);
1212 TCGv_i64 temp64 = tcg_temp_new_i64();
1213 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
1214 gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, temp);
1215 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
1216 tcg_temp_free(temp);
1217 tcg_temp_free_i64(temp64);
1220 static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
1222 TCGv sat_neg = tcg_const_i32(low);
1223 TCGv temp = tcg_const_i32(up);
1225 /* sat_neg = (arg < low ) ? low : arg; */
1226 tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
1228 /* ret = (sat_neg > up ) ? up : sat_neg; */
1229 tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
1231 tcg_temp_free(sat_neg);
1232 tcg_temp_free(temp);
1235 static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
1237 TCGv temp = tcg_const_i32(up);
1238 /* sat_neg = (arg > up ) ? up : arg; */
1239 tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
1240 tcg_temp_free(temp);
1243 static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
1245 if (shift_count == -32) {
1246 tcg_gen_movi_tl(ret, 0);
1247 } else if (shift_count >= 0) {
1248 tcg_gen_shli_tl(ret, r1, shift_count);
1249 } else {
1250 tcg_gen_shri_tl(ret, r1, -shift_count);
1254 static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
1256 TCGv temp_low, temp_high;
1258 if (shiftcount == -16) {
1259 tcg_gen_movi_tl(ret, 0);
1260 } else {
1261 temp_high = tcg_temp_new();
1262 temp_low = tcg_temp_new();
1264 tcg_gen_andi_tl(temp_low, r1, 0xffff);
1265 tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
1266 gen_shi(temp_low, temp_low, shiftcount);
1267 gen_shi(ret, temp_high, shiftcount);
1268 tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
1270 tcg_temp_free(temp_low);
1271 tcg_temp_free(temp_high);
1275 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
1277 uint32_t msk, msk_start;
1278 TCGv temp = tcg_temp_new();
1279 TCGv temp2 = tcg_temp_new();
1280 TCGv t_0 = tcg_const_i32(0);
1282 if (shift_count == 0) {
1283 /* Clear PSW.C and PSW.V */
1284 tcg_gen_movi_tl(cpu_PSW_C, 0);
1285 tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
1286 tcg_gen_mov_tl(ret, r1);
1287 } else if (shift_count == -32) {
1288 /* set PSW.C */
1289 tcg_gen_mov_tl(cpu_PSW_C, r1);
1290 /* fill ret completly with sign bit */
1291 tcg_gen_sari_tl(ret, r1, 31);
1292 /* clear PSW.V */
1293 tcg_gen_movi_tl(cpu_PSW_V, 0);
1294 } else if (shift_count > 0) {
1295 TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
1296 TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
1298 /* calc carry */
1299 msk_start = 32 - shift_count;
1300 msk = ((1 << shift_count) - 1) << msk_start;
1301 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
1302 /* calc v/sv bits */
1303 tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
1304 tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
1305 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
1306 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1307 /* calc sv */
1308 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
1309 /* do shift */
1310 tcg_gen_shli_tl(ret, r1, shift_count);
1312 tcg_temp_free(t_max);
1313 tcg_temp_free(t_min);
1314 } else {
1315 /* clear PSW.V */
1316 tcg_gen_movi_tl(cpu_PSW_V, 0);
1317 /* calc carry */
1318 msk = (1 << -shift_count) - 1;
1319 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
1320 /* do shift */
1321 tcg_gen_sari_tl(ret, r1, -shift_count);
1323 /* calc av overflow bit */
1324 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
1325 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
1326 /* calc sav overflow bit */
1327 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1329 tcg_temp_free(temp);
1330 tcg_temp_free(temp2);
1331 tcg_temp_free(t_0);
1334 static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
1336 gen_helper_sha_ssov(ret, cpu_env, r1, r2);
1339 static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
1341 TCGv temp = tcg_const_i32(con);
1342 gen_shas(ret, r1, temp);
1343 tcg_temp_free(temp);
1346 static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
1348 TCGv low, high;
1350 if (shift_count == 0) {
1351 tcg_gen_mov_tl(ret, r1);
1352 } else if (shift_count > 0) {
1353 low = tcg_temp_new();
1354 high = tcg_temp_new();
1356 tcg_gen_andi_tl(high, r1, 0xffff0000);
1357 tcg_gen_shli_tl(low, r1, shift_count);
1358 tcg_gen_shli_tl(ret, high, shift_count);
1359 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
1361 tcg_temp_free(low);
1362 tcg_temp_free(high);
1363 } else {
1364 low = tcg_temp_new();
1365 high = tcg_temp_new();
1367 tcg_gen_ext16s_tl(low, r1);
1368 tcg_gen_sari_tl(low, low, -shift_count);
1369 tcg_gen_sari_tl(ret, r1, -shift_count);
1370 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
1372 tcg_temp_free(low);
1373 tcg_temp_free(high);
1378 /* ret = {ret[30:0], (r1 cond r2)}; */
1379 static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
1381 TCGv temp = tcg_temp_new();
1382 TCGv temp2 = tcg_temp_new();
1384 tcg_gen_shli_tl(temp, ret, 1);
1385 tcg_gen_setcond_tl(cond, temp2, r1, r2);
1386 tcg_gen_or_tl(ret, temp, temp2);
1388 tcg_temp_free(temp);
1389 tcg_temp_free(temp2);
1392 static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
1394 TCGv temp = tcg_const_i32(con);
1395 gen_sh_cond(cond, ret, r1, temp);
1396 tcg_temp_free(temp);
1399 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
1401 gen_helper_add_ssov(ret, cpu_env, r1, r2);
1404 static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
1406 TCGv temp = tcg_const_i32(con);
1407 gen_helper_add_ssov(ret, cpu_env, r1, temp);
1408 tcg_temp_free(temp);
1411 static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
1413 TCGv temp = tcg_const_i32(con);
1414 gen_helper_add_suov(ret, cpu_env, r1, temp);
1415 tcg_temp_free(temp);
1418 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
1420 gen_helper_sub_ssov(ret, cpu_env, r1, r2);
1423 static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
1425 gen_helper_sub_suov(ret, cpu_env, r1, r2);
1428 static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
1429 int pos1, int pos2,
1430 void(*op1)(TCGv, TCGv, TCGv),
1431 void(*op2)(TCGv, TCGv, TCGv))
1433 TCGv temp1, temp2;
1435 temp1 = tcg_temp_new();
1436 temp2 = tcg_temp_new();
1438 tcg_gen_shri_tl(temp2, r2, pos2);
1439 tcg_gen_shri_tl(temp1, r1, pos1);
1441 (*op1)(temp1, temp1, temp2);
1442 (*op2)(temp1 , ret, temp1);
1444 tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
1446 tcg_temp_free(temp1);
1447 tcg_temp_free(temp2);
1450 /* ret = r1[pos1] op1 r2[pos2]; */
1451 static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
1452 int pos1, int pos2,
1453 void(*op1)(TCGv, TCGv, TCGv))
1455 TCGv temp1, temp2;
1457 temp1 = tcg_temp_new();
1458 temp2 = tcg_temp_new();
1460 tcg_gen_shri_tl(temp2, r2, pos2);
1461 tcg_gen_shri_tl(temp1, r1, pos1);
1463 (*op1)(ret, temp1, temp2);
1465 tcg_gen_andi_tl(ret, ret, 0x1);
1467 tcg_temp_free(temp1);
1468 tcg_temp_free(temp2);
1471 static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
1472 void(*op)(TCGv, TCGv, TCGv))
1474 TCGv temp = tcg_temp_new();
1475 TCGv temp2 = tcg_temp_new();
1476 /* temp = (arg1 cond arg2 )*/
1477 tcg_gen_setcond_tl(cond, temp, r1, r2);
1478 /* temp2 = ret[0]*/
1479 tcg_gen_andi_tl(temp2, ret, 0x1);
1480 /* temp = temp insn temp2 */
1481 (*op)(temp, temp, temp2);
1482 /* ret = {ret[31:1], temp} */
1483 tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
1485 tcg_temp_free(temp);
1486 tcg_temp_free(temp2);
1489 static inline void
1490 gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
1491 void(*op)(TCGv, TCGv, TCGv))
1493 TCGv temp = tcg_const_i32(con);
1494 gen_accumulating_cond(cond, ret, r1, temp, op);
1495 tcg_temp_free(temp);
1498 /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
1499 static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2)
1501 tcg_gen_setcond_tl(cond, ret, r1, r2);
1502 tcg_gen_neg_tl(ret, ret);
1505 static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
1507 TCGv b0 = tcg_temp_new();
1508 TCGv b1 = tcg_temp_new();
1509 TCGv b2 = tcg_temp_new();
1510 TCGv b3 = tcg_temp_new();
1512 /* byte 0 */
1513 tcg_gen_andi_tl(b0, r1, 0xff);
1514 tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
1516 /* byte 1 */
1517 tcg_gen_andi_tl(b1, r1, 0xff00);
1518 tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
1520 /* byte 2 */
1521 tcg_gen_andi_tl(b2, r1, 0xff0000);
1522 tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
1524 /* byte 3 */
1525 tcg_gen_andi_tl(b3, r1, 0xff000000);
1526 tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
1528 /* combine them */
1529 tcg_gen_or_tl(ret, b0, b1);
1530 tcg_gen_or_tl(ret, ret, b2);
1531 tcg_gen_or_tl(ret, ret, b3);
1533 tcg_temp_free(b0);
1534 tcg_temp_free(b1);
1535 tcg_temp_free(b2);
1536 tcg_temp_free(b3);
1539 static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
1541 TCGv h0 = tcg_temp_new();
1542 TCGv h1 = tcg_temp_new();
1544 /* halfword 0 */
1545 tcg_gen_andi_tl(h0, r1, 0xffff);
1546 tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
1548 /* halfword 1 */
1549 tcg_gen_andi_tl(h1, r1, 0xffff0000);
1550 tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
1552 /* combine them */
1553 tcg_gen_or_tl(ret, h0, h1);
1555 tcg_temp_free(h0);
1556 tcg_temp_free(h1);
1558 /* mask = ((1 << width) -1) << pos;
1559 ret = (r1 & ~mask) | (r2 << pos) & mask); */
1560 static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
1562 TCGv mask = tcg_temp_new();
1563 TCGv temp = tcg_temp_new();
1564 TCGv temp2 = tcg_temp_new();
1566 tcg_gen_movi_tl(mask, 1);
1567 tcg_gen_shl_tl(mask, mask, width);
1568 tcg_gen_subi_tl(mask, mask, 1);
1569 tcg_gen_shl_tl(mask, mask, pos);
1571 tcg_gen_shl_tl(temp, r2, pos);
1572 tcg_gen_and_tl(temp, temp, mask);
1573 tcg_gen_andc_tl(temp2, r1, mask);
1574 tcg_gen_or_tl(ret, temp, temp2);
1576 tcg_temp_free(mask);
1577 tcg_temp_free(temp);
1578 tcg_temp_free(temp2);
1581 static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
1583 TCGv_i64 temp = tcg_temp_new_i64();
1585 gen_helper_bsplit(temp, r1);
1586 tcg_gen_extr_i64_i32(rl, rh, temp);
1588 tcg_temp_free_i64(temp);
1591 static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
1593 TCGv_i64 temp = tcg_temp_new_i64();
1595 gen_helper_unpack(temp, r1);
1596 tcg_gen_extr_i64_i32(rl, rh, temp);
1598 tcg_temp_free_i64(temp);
1601 static inline void
1602 gen_dvinit_b(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
1604 TCGv_i64 ret = tcg_temp_new_i64();
1606 if (!tricore_feature(env, TRICORE_FEATURE_131)) {
1607 gen_helper_dvinit_b_13(ret, cpu_env, r1, r2);
1608 } else {
1609 gen_helper_dvinit_b_131(ret, cpu_env, r1, r2);
1611 tcg_gen_extr_i64_i32(rl, rh, ret);
1613 tcg_temp_free_i64(ret);
1616 static inline void
1617 gen_dvinit_h(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
1619 TCGv_i64 ret = tcg_temp_new_i64();
1621 if (!tricore_feature(env, TRICORE_FEATURE_131)) {
1622 gen_helper_dvinit_h_13(ret, cpu_env, r1, r2);
1623 } else {
1624 gen_helper_dvinit_h_131(ret, cpu_env, r1, r2);
1626 tcg_gen_extr_i64_i32(rl, rh, ret);
1628 tcg_temp_free_i64(ret);
1631 static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
1633 TCGv temp = tcg_temp_new();
1634 /* calc AV bit */
1635 tcg_gen_add_tl(temp, arg_low, arg_low);
1636 tcg_gen_xor_tl(temp, temp, arg_low);
1637 tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high);
1638 tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high);
1639 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
1640 /* calc SAV bit */
1641 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1642 tcg_gen_movi_tl(cpu_PSW_V, 0);
1643 tcg_temp_free(temp);
1646 static void gen_calc_usb_mulr_h(TCGv arg)
1648 TCGv temp = tcg_temp_new();
1649 /* calc AV bit */
1650 tcg_gen_add_tl(temp, arg, arg);
1651 tcg_gen_xor_tl(temp, temp, arg);
1652 tcg_gen_shli_tl(cpu_PSW_AV, temp, 16);
1653 tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
1654 /* calc SAV bit */
1655 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1656 /* clear V bit */
1657 tcg_gen_movi_tl(cpu_PSW_V, 0);
1658 tcg_temp_free(temp);
1661 /* helpers for generating program flow micro-ops */
1663 static inline void gen_save_pc(target_ulong pc)
1665 tcg_gen_movi_tl(cpu_PC, pc);
1668 static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
1670 TranslationBlock *tb;
1671 tb = ctx->tb;
1672 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
1673 likely(!ctx->singlestep_enabled)) {
1674 tcg_gen_goto_tb(n);
1675 gen_save_pc(dest);
1676 tcg_gen_exit_tb((uintptr_t)tb + n);
1677 } else {
1678 gen_save_pc(dest);
1679 if (ctx->singlestep_enabled) {
1680 /* raise exception debug */
1682 tcg_gen_exit_tb(0);
1686 static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
1687 TCGv r2, int16_t address)
1689 int jumpLabel;
1690 jumpLabel = gen_new_label();
1691 tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
1693 gen_goto_tb(ctx, 1, ctx->next_pc);
1695 gen_set_label(jumpLabel);
1696 gen_goto_tb(ctx, 0, ctx->pc + address * 2);
1699 static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
1700 int r2, int16_t address)
1702 TCGv temp = tcg_const_i32(r2);
1703 gen_branch_cond(ctx, cond, r1, temp, address);
1704 tcg_temp_free(temp);
1707 static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
1709 int l1;
1710 l1 = gen_new_label();
1712 tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
1713 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
1714 gen_goto_tb(ctx, 1, ctx->pc + offset);
1715 gen_set_label(l1);
1716 gen_goto_tb(ctx, 0, ctx->next_pc);
1719 static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
1720 int r2 , int32_t constant , int32_t offset)
1722 TCGv temp, temp2;
1723 int n;
1725 switch (opc) {
1726 /* SB-format jumps */
1727 case OPC1_16_SB_J:
1728 case OPC1_32_B_J:
1729 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1730 break;
1731 case OPC1_32_B_CALL:
1732 case OPC1_16_SB_CALL:
1733 gen_helper_1arg(call, ctx->next_pc);
1734 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1735 break;
1736 case OPC1_16_SB_JZ:
1737 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset);
1738 break;
1739 case OPC1_16_SB_JNZ:
1740 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset);
1741 break;
1742 /* SBC-format jumps */
1743 case OPC1_16_SBC_JEQ:
1744 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
1745 break;
1746 case OPC1_16_SBC_JNE:
1747 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
1748 break;
1749 /* SBRN-format jumps */
1750 case OPC1_16_SBRN_JZ_T:
1751 temp = tcg_temp_new();
1752 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
1753 gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
1754 tcg_temp_free(temp);
1755 break;
1756 case OPC1_16_SBRN_JNZ_T:
1757 temp = tcg_temp_new();
1758 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
1759 gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
1760 tcg_temp_free(temp);
1761 break;
1762 /* SBR-format jumps */
1763 case OPC1_16_SBR_JEQ:
1764 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
1765 offset);
1766 break;
1767 case OPC1_16_SBR_JNE:
1768 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
1769 offset);
1770 break;
1771 case OPC1_16_SBR_JNZ:
1772 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset);
1773 break;
1774 case OPC1_16_SBR_JNZ_A:
1775 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
1776 break;
1777 case OPC1_16_SBR_JGEZ:
1778 gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset);
1779 break;
1780 case OPC1_16_SBR_JGTZ:
1781 gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset);
1782 break;
1783 case OPC1_16_SBR_JLEZ:
1784 gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset);
1785 break;
1786 case OPC1_16_SBR_JLTZ:
1787 gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset);
1788 break;
1789 case OPC1_16_SBR_JZ:
1790 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset);
1791 break;
1792 case OPC1_16_SBR_JZ_A:
1793 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
1794 break;
1795 case OPC1_16_SBR_LOOP:
1796 gen_loop(ctx, r1, offset * 2 - 32);
1797 break;
1798 /* SR-format jumps */
1799 case OPC1_16_SR_JI:
1800 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
1801 tcg_gen_exit_tb(0);
1802 break;
1803 case OPC2_16_SR_RET:
1804 gen_helper_ret(cpu_env);
1805 tcg_gen_exit_tb(0);
1806 break;
1807 /* B-format */
1808 case OPC1_32_B_CALLA:
1809 gen_helper_1arg(call, ctx->next_pc);
1810 gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
1811 break;
1812 case OPC1_32_B_JLA:
1813 tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
1814 /* fall through */
1815 case OPC1_32_B_JA:
1816 gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
1817 break;
1818 case OPC1_32_B_JL:
1819 tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
1820 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1821 break;
1822 /* BOL format */
1823 case OPCM_32_BRC_EQ_NEQ:
1824 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) {
1825 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset);
1826 } else {
1827 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset);
1829 break;
1830 case OPCM_32_BRC_GE:
1831 if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) {
1832 gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset);
1833 } else {
1834 constant = MASK_OP_BRC_CONST4(ctx->opcode);
1835 gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant,
1836 offset);
1838 break;
1839 case OPCM_32_BRC_JLT:
1840 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) {
1841 gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset);
1842 } else {
1843 constant = MASK_OP_BRC_CONST4(ctx->opcode);
1844 gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant,
1845 offset);
1847 break;
1848 case OPCM_32_BRC_JNE:
1849 temp = tcg_temp_new();
1850 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
1851 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1852 /* subi is unconditional */
1853 tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1854 gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
1855 } else {
1856 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1857 /* addi is unconditional */
1858 tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1859 gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
1861 tcg_temp_free(temp);
1862 break;
1863 /* BRN format */
1864 case OPCM_32_BRN_JTT:
1865 n = MASK_OP_BRN_N(ctx->opcode);
1867 temp = tcg_temp_new();
1868 tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
1870 if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
1871 gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
1872 } else {
1873 gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
1875 tcg_temp_free(temp);
1876 break;
1877 /* BRR Format */
1878 case OPCM_32_BRR_EQ_NEQ:
1879 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) {
1880 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2],
1881 offset);
1882 } else {
1883 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
1884 offset);
1886 break;
1887 case OPCM_32_BRR_ADDR_EQ_NEQ:
1888 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) {
1889 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2],
1890 offset);
1891 } else {
1892 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2],
1893 offset);
1895 break;
1896 case OPCM_32_BRR_GE:
1897 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) {
1898 gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2],
1899 offset);
1900 } else {
1901 gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2],
1902 offset);
1904 break;
1905 case OPCM_32_BRR_JLT:
1906 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) {
1907 gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2],
1908 offset);
1909 } else {
1910 gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2],
1911 offset);
1913 break;
1914 case OPCM_32_BRR_LOOP:
1915 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) {
1916 gen_loop(ctx, r1, offset * 2);
1917 } else {
1918 /* OPC2_32_BRR_LOOPU */
1919 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1921 break;
1922 case OPCM_32_BRR_JNE:
1923 temp = tcg_temp_new();
1924 temp2 = tcg_temp_new();
1925 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
1926 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1927 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1928 tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
1929 /* subi is unconditional */
1930 tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1931 gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
1932 } else {
1933 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1934 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1935 tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
1936 /* addi is unconditional */
1937 tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1938 gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
1940 tcg_temp_free(temp);
1941 tcg_temp_free(temp2);
1942 break;
1943 case OPCM_32_BRR_JNZ:
1944 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) {
1945 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
1946 } else {
1947 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
1949 break;
1950 default:
1951 printf("Branch Error at %x\n", ctx->pc);
1953 ctx->bstate = BS_BRANCH;
1958 * Functions for decoding instructions
1961 static void decode_src_opc(DisasContext *ctx, int op1)
1963 int r1;
1964 int32_t const4;
1965 TCGv temp, temp2;
1967 r1 = MASK_OP_SRC_S1D(ctx->opcode);
1968 const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
1970 switch (op1) {
1971 case OPC1_16_SRC_ADD:
1972 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
1973 break;
1974 case OPC1_16_SRC_ADD_A15:
1975 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
1976 break;
1977 case OPC1_16_SRC_ADD_15A:
1978 gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
1979 break;
1980 case OPC1_16_SRC_ADD_A:
1981 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
1982 break;
1983 case OPC1_16_SRC_CADD:
1984 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
1985 cpu_gpr_d[15]);
1986 break;
1987 case OPC1_16_SRC_CADDN:
1988 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
1989 cpu_gpr_d[15]);
1990 break;
1991 case OPC1_16_SRC_CMOV:
1992 temp = tcg_const_tl(0);
1993 temp2 = tcg_const_tl(const4);
1994 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
1995 temp2, cpu_gpr_d[r1]);
1996 tcg_temp_free(temp);
1997 tcg_temp_free(temp2);
1998 break;
1999 case OPC1_16_SRC_CMOVN:
2000 temp = tcg_const_tl(0);
2001 temp2 = tcg_const_tl(const4);
2002 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
2003 temp2, cpu_gpr_d[r1]);
2004 tcg_temp_free(temp);
2005 tcg_temp_free(temp2);
2006 break;
2007 case OPC1_16_SRC_EQ:
2008 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
2009 const4);
2010 break;
2011 case OPC1_16_SRC_LT:
2012 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
2013 const4);
2014 break;
2015 case OPC1_16_SRC_MOV:
2016 tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
2017 break;
2018 case OPC1_16_SRC_MOV_A:
2019 const4 = MASK_OP_SRC_CONST4(ctx->opcode);
2020 tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
2021 break;
2022 case OPC1_16_SRC_SH:
2023 gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
2024 break;
2025 case OPC1_16_SRC_SHA:
2026 gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
2027 break;
2031 static void decode_srr_opc(DisasContext *ctx, int op1)
2033 int r1, r2;
2034 TCGv temp;
2036 r1 = MASK_OP_SRR_S1D(ctx->opcode);
2037 r2 = MASK_OP_SRR_S2(ctx->opcode);
2039 switch (op1) {
2040 case OPC1_16_SRR_ADD:
2041 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2042 break;
2043 case OPC1_16_SRR_ADD_A15:
2044 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
2045 break;
2046 case OPC1_16_SRR_ADD_15A:
2047 gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2048 break;
2049 case OPC1_16_SRR_ADD_A:
2050 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
2051 break;
2052 case OPC1_16_SRR_ADDS:
2053 gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2054 break;
2055 case OPC1_16_SRR_AND:
2056 tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2057 break;
2058 case OPC1_16_SRR_CMOV:
2059 temp = tcg_const_tl(0);
2060 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
2061 cpu_gpr_d[r2], cpu_gpr_d[r1]);
2062 tcg_temp_free(temp);
2063 break;
2064 case OPC1_16_SRR_CMOVN:
2065 temp = tcg_const_tl(0);
2066 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
2067 cpu_gpr_d[r2], cpu_gpr_d[r1]);
2068 tcg_temp_free(temp);
2069 break;
2070 case OPC1_16_SRR_EQ:
2071 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
2072 cpu_gpr_d[r2]);
2073 break;
2074 case OPC1_16_SRR_LT:
2075 tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
2076 cpu_gpr_d[r2]);
2077 break;
2078 case OPC1_16_SRR_MOV:
2079 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
2080 break;
2081 case OPC1_16_SRR_MOV_A:
2082 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
2083 break;
2084 case OPC1_16_SRR_MOV_AA:
2085 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
2086 break;
2087 case OPC1_16_SRR_MOV_D:
2088 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
2089 break;
2090 case OPC1_16_SRR_MUL:
2091 gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2092 break;
2093 case OPC1_16_SRR_OR:
2094 tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2095 break;
2096 case OPC1_16_SRR_SUB:
2097 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2098 break;
2099 case OPC1_16_SRR_SUB_A15B:
2100 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
2101 break;
2102 case OPC1_16_SRR_SUB_15AB:
2103 gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2104 break;
2105 case OPC1_16_SRR_SUBS:
2106 gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2107 break;
2108 case OPC1_16_SRR_XOR:
2109 tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
2110 break;
2114 static void decode_ssr_opc(DisasContext *ctx, int op1)
2116 int r1, r2;
2118 r1 = MASK_OP_SSR_S1(ctx->opcode);
2119 r2 = MASK_OP_SSR_S2(ctx->opcode);
2121 switch (op1) {
2122 case OPC1_16_SSR_ST_A:
2123 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
2124 break;
2125 case OPC1_16_SSR_ST_A_POSTINC:
2126 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
2127 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
2128 break;
2129 case OPC1_16_SSR_ST_B:
2130 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
2131 break;
2132 case OPC1_16_SSR_ST_B_POSTINC:
2133 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
2134 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
2135 break;
2136 case OPC1_16_SSR_ST_H:
2137 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
2138 break;
2139 case OPC1_16_SSR_ST_H_POSTINC:
2140 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
2141 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
2142 break;
2143 case OPC1_16_SSR_ST_W:
2144 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
2145 break;
2146 case OPC1_16_SSR_ST_W_POSTINC:
2147 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
2148 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
2149 break;
2153 static void decode_sc_opc(DisasContext *ctx, int op1)
2155 int32_t const16;
2157 const16 = MASK_OP_SC_CONST8(ctx->opcode);
2159 switch (op1) {
2160 case OPC1_16_SC_AND:
2161 tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
2162 break;
2163 case OPC1_16_SC_BISR:
2164 gen_helper_1arg(bisr, const16 & 0xff);
2165 break;
2166 case OPC1_16_SC_LD_A:
2167 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
2168 break;
2169 case OPC1_16_SC_LD_W:
2170 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
2171 break;
2172 case OPC1_16_SC_MOV:
2173 tcg_gen_movi_tl(cpu_gpr_d[15], const16);
2174 break;
2175 case OPC1_16_SC_OR:
2176 tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
2177 break;
2178 case OPC1_16_SC_ST_A:
2179 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
2180 break;
2181 case OPC1_16_SC_ST_W:
2182 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
2183 break;
2184 case OPC1_16_SC_SUB_A:
2185 tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
2186 break;
2190 static void decode_slr_opc(DisasContext *ctx, int op1)
2192 int r1, r2;
2194 r1 = MASK_OP_SLR_D(ctx->opcode);
2195 r2 = MASK_OP_SLR_S2(ctx->opcode);
2197 switch (op1) {
2198 /* SLR-format */
2199 case OPC1_16_SLR_LD_A:
2200 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
2201 break;
2202 case OPC1_16_SLR_LD_A_POSTINC:
2203 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
2204 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
2205 break;
2206 case OPC1_16_SLR_LD_BU:
2207 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
2208 break;
2209 case OPC1_16_SLR_LD_BU_POSTINC:
2210 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
2211 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
2212 break;
2213 case OPC1_16_SLR_LD_H:
2214 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
2215 break;
2216 case OPC1_16_SLR_LD_H_POSTINC:
2217 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
2218 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
2219 break;
2220 case OPC1_16_SLR_LD_W:
2221 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
2222 break;
2223 case OPC1_16_SLR_LD_W_POSTINC:
2224 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
2225 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
2226 break;
2230 static void decode_sro_opc(DisasContext *ctx, int op1)
2232 int r2;
2233 int32_t address;
2235 r2 = MASK_OP_SRO_S2(ctx->opcode);
2236 address = MASK_OP_SRO_OFF4(ctx->opcode);
2238 /* SRO-format */
2239 switch (op1) {
2240 case OPC1_16_SRO_LD_A:
2241 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
2242 break;
2243 case OPC1_16_SRO_LD_BU:
2244 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
2245 break;
2246 case OPC1_16_SRO_LD_H:
2247 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
2248 break;
2249 case OPC1_16_SRO_LD_W:
2250 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
2251 break;
2252 case OPC1_16_SRO_ST_A:
2253 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
2254 break;
2255 case OPC1_16_SRO_ST_B:
2256 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
2257 break;
2258 case OPC1_16_SRO_ST_H:
2259 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
2260 break;
2261 case OPC1_16_SRO_ST_W:
2262 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
2263 break;
2267 static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx)
2269 uint32_t op2;
2270 op2 = MASK_OP_SR_OP2(ctx->opcode);
2272 switch (op2) {
2273 case OPC2_16_SR_NOP:
2274 break;
2275 case OPC2_16_SR_RET:
2276 gen_compute_branch(ctx, op2, 0, 0, 0, 0);
2277 break;
2278 case OPC2_16_SR_RFE:
2279 gen_helper_rfe(cpu_env);
2280 tcg_gen_exit_tb(0);
2281 ctx->bstate = BS_BRANCH;
2282 break;
2283 case OPC2_16_SR_DEBUG:
2284 /* raise EXCP_DEBUG */
2285 break;
2289 static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx)
2291 uint32_t op2;
2292 uint32_t r1;
2293 TCGv temp;
2295 r1 = MASK_OP_SR_S1D(ctx->opcode);
2296 op2 = MASK_OP_SR_OP2(ctx->opcode);
2298 switch (op2) {
2299 case OPC2_16_SR_RSUB:
2300 /* overflow only if r1 = -0x80000000 */
2301 temp = tcg_const_i32(-0x80000000);
2302 /* calc V bit */
2303 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
2304 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
2305 /* calc SV bit */
2306 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
2307 /* sub */
2308 tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
2309 /* calc av */
2310 tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
2311 tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
2312 /* calc sav */
2313 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
2314 tcg_temp_free(temp);
2315 break;
2316 case OPC2_16_SR_SAT_B:
2317 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
2318 break;
2319 case OPC2_16_SR_SAT_BU:
2320 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
2321 break;
2322 case OPC2_16_SR_SAT_H:
2323 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
2324 break;
2325 case OPC2_16_SR_SAT_HU:
2326 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
2327 break;
2331 static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
2333 int op1;
2334 int r1, r2;
2335 int32_t const16;
2336 int32_t address;
2337 TCGv temp;
2339 op1 = MASK_OP_MAJOR(ctx->opcode);
2341 /* handle ADDSC.A opcode only being 6 bit long */
2342 if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
2343 op1 = OPC1_16_SRRS_ADDSC_A;
2346 switch (op1) {
2347 case OPC1_16_SRC_ADD:
2348 case OPC1_16_SRC_ADD_A15:
2349 case OPC1_16_SRC_ADD_15A:
2350 case OPC1_16_SRC_ADD_A:
2351 case OPC1_16_SRC_CADD:
2352 case OPC1_16_SRC_CADDN:
2353 case OPC1_16_SRC_CMOV:
2354 case OPC1_16_SRC_CMOVN:
2355 case OPC1_16_SRC_EQ:
2356 case OPC1_16_SRC_LT:
2357 case OPC1_16_SRC_MOV:
2358 case OPC1_16_SRC_MOV_A:
2359 case OPC1_16_SRC_SH:
2360 case OPC1_16_SRC_SHA:
2361 decode_src_opc(ctx, op1);
2362 break;
2363 /* SRR-format */
2364 case OPC1_16_SRR_ADD:
2365 case OPC1_16_SRR_ADD_A15:
2366 case OPC1_16_SRR_ADD_15A:
2367 case OPC1_16_SRR_ADD_A:
2368 case OPC1_16_SRR_ADDS:
2369 case OPC1_16_SRR_AND:
2370 case OPC1_16_SRR_CMOV:
2371 case OPC1_16_SRR_CMOVN:
2372 case OPC1_16_SRR_EQ:
2373 case OPC1_16_SRR_LT:
2374 case OPC1_16_SRR_MOV:
2375 case OPC1_16_SRR_MOV_A:
2376 case OPC1_16_SRR_MOV_AA:
2377 case OPC1_16_SRR_MOV_D:
2378 case OPC1_16_SRR_MUL:
2379 case OPC1_16_SRR_OR:
2380 case OPC1_16_SRR_SUB:
2381 case OPC1_16_SRR_SUB_A15B:
2382 case OPC1_16_SRR_SUB_15AB:
2383 case OPC1_16_SRR_SUBS:
2384 case OPC1_16_SRR_XOR:
2385 decode_srr_opc(ctx, op1);
2386 break;
2387 /* SSR-format */
2388 case OPC1_16_SSR_ST_A:
2389 case OPC1_16_SSR_ST_A_POSTINC:
2390 case OPC1_16_SSR_ST_B:
2391 case OPC1_16_SSR_ST_B_POSTINC:
2392 case OPC1_16_SSR_ST_H:
2393 case OPC1_16_SSR_ST_H_POSTINC:
2394 case OPC1_16_SSR_ST_W:
2395 case OPC1_16_SSR_ST_W_POSTINC:
2396 decode_ssr_opc(ctx, op1);
2397 break;
2398 /* SRRS-format */
2399 case OPC1_16_SRRS_ADDSC_A:
2400 r2 = MASK_OP_SRRS_S2(ctx->opcode);
2401 r1 = MASK_OP_SRRS_S1D(ctx->opcode);
2402 const16 = MASK_OP_SRRS_N(ctx->opcode);
2403 temp = tcg_temp_new();
2404 tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
2405 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
2406 tcg_temp_free(temp);
2407 break;
2408 /* SLRO-format */
2409 case OPC1_16_SLRO_LD_A:
2410 r1 = MASK_OP_SLRO_D(ctx->opcode);
2411 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2412 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2413 break;
2414 case OPC1_16_SLRO_LD_BU:
2415 r1 = MASK_OP_SLRO_D(ctx->opcode);
2416 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2417 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
2418 break;
2419 case OPC1_16_SLRO_LD_H:
2420 r1 = MASK_OP_SLRO_D(ctx->opcode);
2421 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2422 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
2423 break;
2424 case OPC1_16_SLRO_LD_W:
2425 r1 = MASK_OP_SLRO_D(ctx->opcode);
2426 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2427 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2428 break;
2429 /* SB-format */
2430 case OPC1_16_SB_CALL:
2431 case OPC1_16_SB_J:
2432 case OPC1_16_SB_JNZ:
2433 case OPC1_16_SB_JZ:
2434 address = MASK_OP_SB_DISP8_SEXT(ctx->opcode);
2435 gen_compute_branch(ctx, op1, 0, 0, 0, address);
2436 break;
2437 /* SBC-format */
2438 case OPC1_16_SBC_JEQ:
2439 case OPC1_16_SBC_JNE:
2440 address = MASK_OP_SBC_DISP4(ctx->opcode);
2441 const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
2442 gen_compute_branch(ctx, op1, 0, 0, const16, address);
2443 break;
2444 /* SBRN-format */
2445 case OPC1_16_SBRN_JNZ_T:
2446 case OPC1_16_SBRN_JZ_T:
2447 address = MASK_OP_SBRN_DISP4(ctx->opcode);
2448 const16 = MASK_OP_SBRN_N(ctx->opcode);
2449 gen_compute_branch(ctx, op1, 0, 0, const16, address);
2450 break;
2451 /* SBR-format */
2452 case OPC1_16_SBR_JEQ:
2453 case OPC1_16_SBR_JGEZ:
2454 case OPC1_16_SBR_JGTZ:
2455 case OPC1_16_SBR_JLEZ:
2456 case OPC1_16_SBR_JLTZ:
2457 case OPC1_16_SBR_JNE:
2458 case OPC1_16_SBR_JNZ:
2459 case OPC1_16_SBR_JNZ_A:
2460 case OPC1_16_SBR_JZ:
2461 case OPC1_16_SBR_JZ_A:
2462 case OPC1_16_SBR_LOOP:
2463 r1 = MASK_OP_SBR_S2(ctx->opcode);
2464 address = MASK_OP_SBR_DISP4(ctx->opcode);
2465 gen_compute_branch(ctx, op1, r1, 0, 0, address);
2466 break;
2467 /* SC-format */
2468 case OPC1_16_SC_AND:
2469 case OPC1_16_SC_BISR:
2470 case OPC1_16_SC_LD_A:
2471 case OPC1_16_SC_LD_W:
2472 case OPC1_16_SC_MOV:
2473 case OPC1_16_SC_OR:
2474 case OPC1_16_SC_ST_A:
2475 case OPC1_16_SC_ST_W:
2476 case OPC1_16_SC_SUB_A:
2477 decode_sc_opc(ctx, op1);
2478 break;
2479 /* SLR-format */
2480 case OPC1_16_SLR_LD_A:
2481 case OPC1_16_SLR_LD_A_POSTINC:
2482 case OPC1_16_SLR_LD_BU:
2483 case OPC1_16_SLR_LD_BU_POSTINC:
2484 case OPC1_16_SLR_LD_H:
2485 case OPC1_16_SLR_LD_H_POSTINC:
2486 case OPC1_16_SLR_LD_W:
2487 case OPC1_16_SLR_LD_W_POSTINC:
2488 decode_slr_opc(ctx, op1);
2489 break;
2490 /* SRO-format */
2491 case OPC1_16_SRO_LD_A:
2492 case OPC1_16_SRO_LD_BU:
2493 case OPC1_16_SRO_LD_H:
2494 case OPC1_16_SRO_LD_W:
2495 case OPC1_16_SRO_ST_A:
2496 case OPC1_16_SRO_ST_B:
2497 case OPC1_16_SRO_ST_H:
2498 case OPC1_16_SRO_ST_W:
2499 decode_sro_opc(ctx, op1);
2500 break;
2501 /* SSRO-format */
2502 case OPC1_16_SSRO_ST_A:
2503 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2504 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2505 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2506 break;
2507 case OPC1_16_SSRO_ST_B:
2508 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2509 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2510 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
2511 break;
2512 case OPC1_16_SSRO_ST_H:
2513 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2514 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2515 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
2516 break;
2517 case OPC1_16_SSRO_ST_W:
2518 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2519 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2520 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2521 break;
2522 /* SR-format */
2523 case OPCM_16_SR_SYSTEM:
2524 decode_sr_system(env, ctx);
2525 break;
2526 case OPCM_16_SR_ACCU:
2527 decode_sr_accu(env, ctx);
2528 break;
2529 case OPC1_16_SR_JI:
2530 r1 = MASK_OP_SR_S1D(ctx->opcode);
2531 gen_compute_branch(ctx, op1, r1, 0, 0, 0);
2532 break;
2533 case OPC1_16_SR_NOT:
2534 r1 = MASK_OP_SR_S1D(ctx->opcode);
2535 tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
2536 break;
2541 * 32 bit instructions
2544 /* ABS-format */
2545 static void decode_abs_ldw(CPUTriCoreState *env, DisasContext *ctx)
2547 int32_t op2;
2548 int32_t r1;
2549 uint32_t address;
2550 TCGv temp;
2552 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2553 address = MASK_OP_ABS_OFF18(ctx->opcode);
2554 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2556 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2558 switch (op2) {
2559 case OPC2_32_ABS_LD_A:
2560 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
2561 break;
2562 case OPC2_32_ABS_LD_D:
2563 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2564 break;
2565 case OPC2_32_ABS_LD_DA:
2566 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2567 break;
2568 case OPC2_32_ABS_LD_W:
2569 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
2570 break;
2573 tcg_temp_free(temp);
2576 static void decode_abs_ldb(CPUTriCoreState *env, DisasContext *ctx)
2578 int32_t op2;
2579 int32_t r1;
2580 uint32_t address;
2581 TCGv temp;
2583 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2584 address = MASK_OP_ABS_OFF18(ctx->opcode);
2585 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2587 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2589 switch (op2) {
2590 case OPC2_32_ABS_LD_B:
2591 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
2592 break;
2593 case OPC2_32_ABS_LD_BU:
2594 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
2595 break;
2596 case OPC2_32_ABS_LD_H:
2597 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
2598 break;
2599 case OPC2_32_ABS_LD_HU:
2600 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
2601 break;
2604 tcg_temp_free(temp);
2607 static void decode_abs_ldst_swap(CPUTriCoreState *env, DisasContext *ctx)
2609 int32_t op2;
2610 int32_t r1;
2611 uint32_t address;
2612 TCGv temp;
2614 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2615 address = MASK_OP_ABS_OFF18(ctx->opcode);
2616 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2618 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2620 switch (op2) {
2621 case OPC2_32_ABS_LDMST:
2622 gen_ldmst(ctx, r1, temp);
2623 break;
2624 case OPC2_32_ABS_SWAP_W:
2625 gen_swap(ctx, r1, temp);
2626 break;
2629 tcg_temp_free(temp);
2632 static void decode_abs_ldst_context(CPUTriCoreState *env, DisasContext *ctx)
2634 uint32_t op2;
2635 int32_t off18;
2637 off18 = MASK_OP_ABS_OFF18(ctx->opcode);
2638 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2640 switch (op2) {
2641 case OPC2_32_ABS_LDLCX:
2642 gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18));
2643 break;
2644 case OPC2_32_ABS_LDUCX:
2645 gen_helper_1arg(lducx, EA_ABS_FORMAT(off18));
2646 break;
2647 case OPC2_32_ABS_STLCX:
2648 gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18));
2649 break;
2650 case OPC2_32_ABS_STUCX:
2651 gen_helper_1arg(stucx, EA_ABS_FORMAT(off18));
2652 break;
2656 static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx)
2658 int32_t op2;
2659 int32_t r1;
2660 uint32_t address;
2661 TCGv temp;
2663 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2664 address = MASK_OP_ABS_OFF18(ctx->opcode);
2665 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2667 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2669 switch (op2) {
2670 case OPC2_32_ABS_ST_A:
2671 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
2672 break;
2673 case OPC2_32_ABS_ST_D:
2674 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2675 break;
2676 case OPC2_32_ABS_ST_DA:
2677 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2678 break;
2679 case OPC2_32_ABS_ST_W:
2680 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
2681 break;
2684 tcg_temp_free(temp);
2687 static void decode_abs_storeb_h(CPUTriCoreState *env, DisasContext *ctx)
2689 int32_t op2;
2690 int32_t r1;
2691 uint32_t address;
2692 TCGv temp;
2694 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2695 address = MASK_OP_ABS_OFF18(ctx->opcode);
2696 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2698 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2700 switch (op2) {
2701 case OPC2_32_ABS_ST_B:
2702 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
2703 break;
2704 case OPC2_32_ABS_ST_H:
2705 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
2706 break;
2708 tcg_temp_free(temp);
2711 /* Bit-format */
2713 static void decode_bit_andacc(CPUTriCoreState *env, DisasContext *ctx)
2715 uint32_t op2;
2716 int r1, r2, r3;
2717 int pos1, pos2;
2719 r1 = MASK_OP_BIT_S1(ctx->opcode);
2720 r2 = MASK_OP_BIT_S2(ctx->opcode);
2721 r3 = MASK_OP_BIT_D(ctx->opcode);
2722 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2723 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2724 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2727 switch (op2) {
2728 case OPC2_32_BIT_AND_AND_T:
2729 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2730 pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl);
2731 break;
2732 case OPC2_32_BIT_AND_ANDN_T:
2733 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2734 pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
2735 break;
2736 case OPC2_32_BIT_AND_NOR_T:
2737 if (TCG_TARGET_HAS_andc_i32) {
2738 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2739 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
2740 } else {
2741 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2742 pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl);
2744 break;
2745 case OPC2_32_BIT_AND_OR_T:
2746 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2747 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl);
2748 break;
2752 static void decode_bit_logical_t(CPUTriCoreState *env, DisasContext *ctx)
2754 uint32_t op2;
2755 int r1, r2, r3;
2756 int pos1, pos2;
2757 r1 = MASK_OP_BIT_S1(ctx->opcode);
2758 r2 = MASK_OP_BIT_S2(ctx->opcode);
2759 r3 = MASK_OP_BIT_D(ctx->opcode);
2760 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2761 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2762 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2764 switch (op2) {
2765 case OPC2_32_BIT_AND_T:
2766 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2767 pos1, pos2, &tcg_gen_and_tl);
2768 break;
2769 case OPC2_32_BIT_ANDN_T:
2770 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2771 pos1, pos2, &tcg_gen_andc_tl);
2772 break;
2773 case OPC2_32_BIT_NOR_T:
2774 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2775 pos1, pos2, &tcg_gen_nor_tl);
2776 break;
2777 case OPC2_32_BIT_OR_T:
2778 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2779 pos1, pos2, &tcg_gen_or_tl);
2780 break;
2784 static void decode_bit_insert(CPUTriCoreState *env, DisasContext *ctx)
2786 uint32_t op2;
2787 int r1, r2, r3;
2788 int pos1, pos2;
2789 TCGv temp;
2790 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2791 r1 = MASK_OP_BIT_S1(ctx->opcode);
2792 r2 = MASK_OP_BIT_S2(ctx->opcode);
2793 r3 = MASK_OP_BIT_D(ctx->opcode);
2794 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2795 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2797 temp = tcg_temp_new();
2799 tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
2800 if (op2 == OPC2_32_BIT_INSN_T) {
2801 tcg_gen_not_tl(temp, temp);
2803 tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
2804 tcg_temp_free(temp);
2807 static void decode_bit_logical_t2(CPUTriCoreState *env, DisasContext *ctx)
2809 uint32_t op2;
2811 int r1, r2, r3;
2812 int pos1, pos2;
2814 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2815 r1 = MASK_OP_BIT_S1(ctx->opcode);
2816 r2 = MASK_OP_BIT_S2(ctx->opcode);
2817 r3 = MASK_OP_BIT_D(ctx->opcode);
2818 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2819 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2821 switch (op2) {
2822 case OPC2_32_BIT_NAND_T:
2823 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2824 pos1, pos2, &tcg_gen_nand_tl);
2825 break;
2826 case OPC2_32_BIT_ORN_T:
2827 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2828 pos1, pos2, &tcg_gen_orc_tl);
2829 break;
2830 case OPC2_32_BIT_XNOR_T:
2831 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2832 pos1, pos2, &tcg_gen_eqv_tl);
2833 break;
2834 case OPC2_32_BIT_XOR_T:
2835 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2836 pos1, pos2, &tcg_gen_xor_tl);
2837 break;
2841 static void decode_bit_orand(CPUTriCoreState *env, DisasContext *ctx)
2843 uint32_t op2;
2845 int r1, r2, r3;
2846 int pos1, pos2;
2848 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2849 r1 = MASK_OP_BIT_S1(ctx->opcode);
2850 r2 = MASK_OP_BIT_S2(ctx->opcode);
2851 r3 = MASK_OP_BIT_D(ctx->opcode);
2852 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2853 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2855 switch (op2) {
2856 case OPC2_32_BIT_OR_AND_T:
2857 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2858 pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl);
2859 break;
2860 case OPC2_32_BIT_OR_ANDN_T:
2861 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2862 pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
2863 break;
2864 case OPC2_32_BIT_OR_NOR_T:
2865 if (TCG_TARGET_HAS_orc_i32) {
2866 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2867 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
2868 } else {
2869 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2870 pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl);
2872 break;
2873 case OPC2_32_BIT_OR_OR_T:
2874 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2875 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl);
2876 break;
2880 static void decode_bit_sh_logic1(CPUTriCoreState *env, DisasContext *ctx)
2882 uint32_t op2;
2883 int r1, r2, r3;
2884 int pos1, pos2;
2885 TCGv temp;
2887 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2888 r1 = MASK_OP_BIT_S1(ctx->opcode);
2889 r2 = MASK_OP_BIT_S2(ctx->opcode);
2890 r3 = MASK_OP_BIT_D(ctx->opcode);
2891 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2892 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2894 temp = tcg_temp_new();
2896 switch (op2) {
2897 case OPC2_32_BIT_SH_AND_T:
2898 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2899 pos1, pos2, &tcg_gen_and_tl);
2900 break;
2901 case OPC2_32_BIT_SH_ANDN_T:
2902 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2903 pos1, pos2, &tcg_gen_andc_tl);
2904 break;
2905 case OPC2_32_BIT_SH_NOR_T:
2906 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2907 pos1, pos2, &tcg_gen_nor_tl);
2908 break;
2909 case OPC2_32_BIT_SH_OR_T:
2910 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2911 pos1, pos2, &tcg_gen_or_tl);
2912 break;
2914 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
2915 tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
2916 tcg_temp_free(temp);
2919 static void decode_bit_sh_logic2(CPUTriCoreState *env, DisasContext *ctx)
2921 uint32_t op2;
2922 int r1, r2, r3;
2923 int pos1, pos2;
2924 TCGv temp;
2926 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2927 r1 = MASK_OP_BIT_S1(ctx->opcode);
2928 r2 = MASK_OP_BIT_S2(ctx->opcode);
2929 r3 = MASK_OP_BIT_D(ctx->opcode);
2930 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2931 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2933 temp = tcg_temp_new();
2935 switch (op2) {
2936 case OPC2_32_BIT_SH_NAND_T:
2937 gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] ,
2938 pos1, pos2, &tcg_gen_nand_tl);
2939 break;
2940 case OPC2_32_BIT_SH_ORN_T:
2941 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2942 pos1, pos2, &tcg_gen_orc_tl);
2943 break;
2944 case OPC2_32_BIT_SH_XNOR_T:
2945 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2946 pos1, pos2, &tcg_gen_eqv_tl);
2947 break;
2948 case OPC2_32_BIT_SH_XOR_T:
2949 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2950 pos1, pos2, &tcg_gen_xor_tl);
2951 break;
2953 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
2954 tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
2955 tcg_temp_free(temp);
2958 /* BO-format */
2961 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env,
2962 DisasContext *ctx)
2964 uint32_t op2;
2965 uint32_t off10;
2966 int32_t r1, r2;
2967 TCGv temp;
2969 r1 = MASK_OP_BO_S1D(ctx->opcode);
2970 r2 = MASK_OP_BO_S2(ctx->opcode);
2971 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
2972 op2 = MASK_OP_BO_OP2(ctx->opcode);
2974 switch (op2) {
2975 case OPC2_32_BO_CACHEA_WI_SHORTOFF:
2976 case OPC2_32_BO_CACHEA_W_SHORTOFF:
2977 case OPC2_32_BO_CACHEA_I_SHORTOFF:
2978 /* instruction to access the cache */
2979 break;
2980 case OPC2_32_BO_CACHEA_WI_POSTINC:
2981 case OPC2_32_BO_CACHEA_W_POSTINC:
2982 case OPC2_32_BO_CACHEA_I_POSTINC:
2983 /* instruction to access the cache, but we still need to handle
2984 the addressing mode */
2985 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2986 break;
2987 case OPC2_32_BO_CACHEA_WI_PREINC:
2988 case OPC2_32_BO_CACHEA_W_PREINC:
2989 case OPC2_32_BO_CACHEA_I_PREINC:
2990 /* instruction to access the cache, but we still need to handle
2991 the addressing mode */
2992 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2993 break;
2994 case OPC2_32_BO_CACHEI_WI_SHORTOFF:
2995 case OPC2_32_BO_CACHEI_W_SHORTOFF:
2996 /* TODO: Raise illegal opcode trap,
2997 if !tricore_feature(TRICORE_FEATURE_131) */
2998 break;
2999 case OPC2_32_BO_CACHEI_W_POSTINC:
3000 case OPC2_32_BO_CACHEI_WI_POSTINC:
3001 if (tricore_feature(env, TRICORE_FEATURE_131)) {
3002 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
3003 } /* TODO: else raise illegal opcode trap */
3004 break;
3005 case OPC2_32_BO_CACHEI_W_PREINC:
3006 case OPC2_32_BO_CACHEI_WI_PREINC:
3007 if (tricore_feature(env, TRICORE_FEATURE_131)) {
3008 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
3009 } /* TODO: else raise illegal opcode trap */
3010 break;
3011 case OPC2_32_BO_ST_A_SHORTOFF:
3012 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
3013 break;
3014 case OPC2_32_BO_ST_A_POSTINC:
3015 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
3016 MO_LESL);
3017 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3018 break;
3019 case OPC2_32_BO_ST_A_PREINC:
3020 gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
3021 break;
3022 case OPC2_32_BO_ST_B_SHORTOFF:
3023 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
3024 break;
3025 case OPC2_32_BO_ST_B_POSTINC:
3026 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3027 MO_UB);
3028 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3029 break;
3030 case OPC2_32_BO_ST_B_PREINC:
3031 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
3032 break;
3033 case OPC2_32_BO_ST_D_SHORTOFF:
3034 gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
3035 off10, ctx);
3036 break;
3037 case OPC2_32_BO_ST_D_POSTINC:
3038 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
3039 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3040 break;
3041 case OPC2_32_BO_ST_D_PREINC:
3042 temp = tcg_temp_new();
3043 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3044 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
3045 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
3046 tcg_temp_free(temp);
3047 break;
3048 case OPC2_32_BO_ST_DA_SHORTOFF:
3049 gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
3050 off10, ctx);
3051 break;
3052 case OPC2_32_BO_ST_DA_POSTINC:
3053 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
3054 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3055 break;
3056 case OPC2_32_BO_ST_DA_PREINC:
3057 temp = tcg_temp_new();
3058 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3059 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
3060 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
3061 tcg_temp_free(temp);
3062 break;
3063 case OPC2_32_BO_ST_H_SHORTOFF:
3064 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3065 break;
3066 case OPC2_32_BO_ST_H_POSTINC:
3067 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3068 MO_LEUW);
3069 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3070 break;
3071 case OPC2_32_BO_ST_H_PREINC:
3072 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3073 break;
3074 case OPC2_32_BO_ST_Q_SHORTOFF:
3075 temp = tcg_temp_new();
3076 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
3077 gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
3078 tcg_temp_free(temp);
3079 break;
3080 case OPC2_32_BO_ST_Q_POSTINC:
3081 temp = tcg_temp_new();
3082 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
3083 tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
3084 MO_LEUW);
3085 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3086 tcg_temp_free(temp);
3087 break;
3088 case OPC2_32_BO_ST_Q_PREINC:
3089 temp = tcg_temp_new();
3090 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
3091 gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
3092 tcg_temp_free(temp);
3093 break;
3094 case OPC2_32_BO_ST_W_SHORTOFF:
3095 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3096 break;
3097 case OPC2_32_BO_ST_W_POSTINC:
3098 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3099 MO_LEUL);
3100 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3101 break;
3102 case OPC2_32_BO_ST_W_PREINC:
3103 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3104 break;
3108 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState *env,
3109 DisasContext *ctx)
3111 uint32_t op2;
3112 uint32_t off10;
3113 int32_t r1, r2;
3114 TCGv temp, temp2, temp3;
3116 r1 = MASK_OP_BO_S1D(ctx->opcode);
3117 r2 = MASK_OP_BO_S2(ctx->opcode);
3118 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3119 op2 = MASK_OP_BO_OP2(ctx->opcode);
3121 temp = tcg_temp_new();
3122 temp2 = tcg_temp_new();
3123 temp3 = tcg_const_i32(off10);
3125 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
3126 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3128 switch (op2) {
3129 case OPC2_32_BO_CACHEA_WI_BR:
3130 case OPC2_32_BO_CACHEA_W_BR:
3131 case OPC2_32_BO_CACHEA_I_BR:
3132 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3133 break;
3134 case OPC2_32_BO_CACHEA_WI_CIRC:
3135 case OPC2_32_BO_CACHEA_W_CIRC:
3136 case OPC2_32_BO_CACHEA_I_CIRC:
3137 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3138 break;
3139 case OPC2_32_BO_ST_A_BR:
3140 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3141 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3142 break;
3143 case OPC2_32_BO_ST_A_CIRC:
3144 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3145 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3146 break;
3147 case OPC2_32_BO_ST_B_BR:
3148 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3149 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3150 break;
3151 case OPC2_32_BO_ST_B_CIRC:
3152 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3153 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3154 break;
3155 case OPC2_32_BO_ST_D_BR:
3156 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
3157 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3158 break;
3159 case OPC2_32_BO_ST_D_CIRC:
3160 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3161 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3162 tcg_gen_addi_tl(temp, temp, 4);
3163 tcg_gen_rem_tl(temp, temp, temp2);
3164 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3165 tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3166 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3167 break;
3168 case OPC2_32_BO_ST_DA_BR:
3169 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
3170 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3171 break;
3172 case OPC2_32_BO_ST_DA_CIRC:
3173 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3174 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3175 tcg_gen_addi_tl(temp, temp, 4);
3176 tcg_gen_rem_tl(temp, temp, temp2);
3177 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3178 tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3179 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3180 break;
3181 case OPC2_32_BO_ST_H_BR:
3182 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3183 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3184 break;
3185 case OPC2_32_BO_ST_H_CIRC:
3186 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3187 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3188 break;
3189 case OPC2_32_BO_ST_Q_BR:
3190 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
3191 tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
3192 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3193 break;
3194 case OPC2_32_BO_ST_Q_CIRC:
3195 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
3196 tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
3197 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3198 break;
3199 case OPC2_32_BO_ST_W_BR:
3200 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3201 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3202 break;
3203 case OPC2_32_BO_ST_W_CIRC:
3204 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3205 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3206 break;
3208 tcg_temp_free(temp);
3209 tcg_temp_free(temp2);
3210 tcg_temp_free(temp3);
3213 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState *env,
3214 DisasContext *ctx)
3216 uint32_t op2;
3217 uint32_t off10;
3218 int32_t r1, r2;
3219 TCGv temp;
3221 r1 = MASK_OP_BO_S1D(ctx->opcode);
3222 r2 = MASK_OP_BO_S2(ctx->opcode);
3223 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3224 op2 = MASK_OP_BO_OP2(ctx->opcode);
3226 switch (op2) {
3227 case OPC2_32_BO_LD_A_SHORTOFF:
3228 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3229 break;
3230 case OPC2_32_BO_LD_A_POSTINC:
3231 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
3232 MO_LEUL);
3233 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3234 break;
3235 case OPC2_32_BO_LD_A_PREINC:
3236 gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3237 break;
3238 case OPC2_32_BO_LD_B_SHORTOFF:
3239 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
3240 break;
3241 case OPC2_32_BO_LD_B_POSTINC:
3242 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3243 MO_SB);
3244 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3245 break;
3246 case OPC2_32_BO_LD_B_PREINC:
3247 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
3248 break;
3249 case OPC2_32_BO_LD_BU_SHORTOFF:
3250 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
3251 break;
3252 case OPC2_32_BO_LD_BU_POSTINC:
3253 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3254 MO_UB);
3255 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3256 break;
3257 case OPC2_32_BO_LD_BU_PREINC:
3258 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
3259 break;
3260 case OPC2_32_BO_LD_D_SHORTOFF:
3261 gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
3262 off10, ctx);
3263 break;
3264 case OPC2_32_BO_LD_D_POSTINC:
3265 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
3266 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3267 break;
3268 case OPC2_32_BO_LD_D_PREINC:
3269 temp = tcg_temp_new();
3270 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3271 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
3272 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
3273 tcg_temp_free(temp);
3274 break;
3275 case OPC2_32_BO_LD_DA_SHORTOFF:
3276 gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
3277 off10, ctx);
3278 break;
3279 case OPC2_32_BO_LD_DA_POSTINC:
3280 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
3281 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3282 break;
3283 case OPC2_32_BO_LD_DA_PREINC:
3284 temp = tcg_temp_new();
3285 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3286 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
3287 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
3288 tcg_temp_free(temp);
3289 break;
3290 case OPC2_32_BO_LD_H_SHORTOFF:
3291 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
3292 break;
3293 case OPC2_32_BO_LD_H_POSTINC:
3294 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3295 MO_LESW);
3296 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3297 break;
3298 case OPC2_32_BO_LD_H_PREINC:
3299 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
3300 break;
3301 case OPC2_32_BO_LD_HU_SHORTOFF:
3302 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3303 break;
3304 case OPC2_32_BO_LD_HU_POSTINC:
3305 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3306 MO_LEUW);
3307 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3308 break;
3309 case OPC2_32_BO_LD_HU_PREINC:
3310 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3311 break;
3312 case OPC2_32_BO_LD_Q_SHORTOFF:
3313 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3314 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3315 break;
3316 case OPC2_32_BO_LD_Q_POSTINC:
3317 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3318 MO_LEUW);
3319 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3320 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3321 break;
3322 case OPC2_32_BO_LD_Q_PREINC:
3323 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
3324 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3325 break;
3326 case OPC2_32_BO_LD_W_SHORTOFF:
3327 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3328 break;
3329 case OPC2_32_BO_LD_W_POSTINC:
3330 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
3331 MO_LEUL);
3332 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3333 break;
3334 case OPC2_32_BO_LD_W_PREINC:
3335 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
3336 break;
3340 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState *env,
3341 DisasContext *ctx)
3343 uint32_t op2;
3344 uint32_t off10;
3345 int r1, r2;
3347 TCGv temp, temp2, temp3;
3349 r1 = MASK_OP_BO_S1D(ctx->opcode);
3350 r2 = MASK_OP_BO_S2(ctx->opcode);
3351 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3352 op2 = MASK_OP_BO_OP2(ctx->opcode);
3354 temp = tcg_temp_new();
3355 temp2 = tcg_temp_new();
3356 temp3 = tcg_const_i32(off10);
3358 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
3359 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3362 switch (op2) {
3363 case OPC2_32_BO_LD_A_BR:
3364 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3365 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3366 break;
3367 case OPC2_32_BO_LD_A_CIRC:
3368 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3369 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3370 break;
3371 case OPC2_32_BO_LD_B_BR:
3372 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
3373 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3374 break;
3375 case OPC2_32_BO_LD_B_CIRC:
3376 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
3377 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3378 break;
3379 case OPC2_32_BO_LD_BU_BR:
3380 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3381 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3382 break;
3383 case OPC2_32_BO_LD_BU_CIRC:
3384 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3385 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3386 break;
3387 case OPC2_32_BO_LD_D_BR:
3388 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
3389 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3390 break;
3391 case OPC2_32_BO_LD_D_CIRC:
3392 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3393 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3394 tcg_gen_addi_tl(temp, temp, 4);
3395 tcg_gen_rem_tl(temp, temp, temp2);
3396 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3397 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3398 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3399 break;
3400 case OPC2_32_BO_LD_DA_BR:
3401 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
3402 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3403 break;
3404 case OPC2_32_BO_LD_DA_CIRC:
3405 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3406 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3407 tcg_gen_addi_tl(temp, temp, 4);
3408 tcg_gen_rem_tl(temp, temp, temp2);
3409 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3410 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3411 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3412 break;
3413 case OPC2_32_BO_LD_H_BR:
3414 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
3415 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3416 break;
3417 case OPC2_32_BO_LD_H_CIRC:
3418 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
3419 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3420 break;
3421 case OPC2_32_BO_LD_HU_BR:
3422 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3423 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3424 break;
3425 case OPC2_32_BO_LD_HU_CIRC:
3426 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3427 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3428 break;
3429 case OPC2_32_BO_LD_Q_BR:
3430 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3431 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3432 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3433 break;
3434 case OPC2_32_BO_LD_Q_CIRC:
3435 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3436 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3437 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3438 break;
3439 case OPC2_32_BO_LD_W_BR:
3440 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3441 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3442 break;
3443 case OPC2_32_BO_LD_W_CIRC:
3444 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3445 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3446 break;
3448 tcg_temp_free(temp);
3449 tcg_temp_free(temp2);
3450 tcg_temp_free(temp3);
3453 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState *env,
3454 DisasContext *ctx)
3456 uint32_t op2;
3457 uint32_t off10;
3458 int r1, r2;
3460 TCGv temp, temp2;
3462 r1 = MASK_OP_BO_S1D(ctx->opcode);
3463 r2 = MASK_OP_BO_S2(ctx->opcode);
3464 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3465 op2 = MASK_OP_BO_OP2(ctx->opcode);
3468 temp = tcg_temp_new();
3469 temp2 = tcg_temp_new();
3471 switch (op2) {
3472 case OPC2_32_BO_LDLCX_SHORTOFF:
3473 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3474 gen_helper_ldlcx(cpu_env, temp);
3475 break;
3476 case OPC2_32_BO_LDMST_SHORTOFF:
3477 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3478 gen_ldmst(ctx, r1, temp);
3479 break;
3480 case OPC2_32_BO_LDMST_POSTINC:
3481 gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
3482 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3483 break;
3484 case OPC2_32_BO_LDMST_PREINC:
3485 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3486 gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
3487 break;
3488 case OPC2_32_BO_LDUCX_SHORTOFF:
3489 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3490 gen_helper_lducx(cpu_env, temp);
3491 break;
3492 case OPC2_32_BO_LEA_SHORTOFF:
3493 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
3494 break;
3495 case OPC2_32_BO_STLCX_SHORTOFF:
3496 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3497 gen_helper_stlcx(cpu_env, temp);
3498 break;
3499 case OPC2_32_BO_STUCX_SHORTOFF:
3500 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3501 gen_helper_stucx(cpu_env, temp);
3502 break;
3503 case OPC2_32_BO_SWAP_W_SHORTOFF:
3504 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3505 gen_swap(ctx, r1, temp);
3506 break;
3507 case OPC2_32_BO_SWAP_W_POSTINC:
3508 gen_swap(ctx, r1, cpu_gpr_a[r2]);
3509 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3510 break;
3511 case OPC2_32_BO_SWAP_W_PREINC:
3512 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3513 gen_swap(ctx, r1, cpu_gpr_a[r2]);
3514 break;
3516 tcg_temp_free(temp);
3517 tcg_temp_free(temp2);
3520 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState *env,
3521 DisasContext *ctx)
3523 uint32_t op2;
3524 uint32_t off10;
3525 int r1, r2;
3527 TCGv temp, temp2, temp3;
3529 r1 = MASK_OP_BO_S1D(ctx->opcode);
3530 r2 = MASK_OP_BO_S2(ctx->opcode);
3531 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3532 op2 = MASK_OP_BO_OP2(ctx->opcode);
3534 temp = tcg_temp_new();
3535 temp2 = tcg_temp_new();
3536 temp3 = tcg_const_i32(off10);
3538 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
3539 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3541 switch (op2) {
3542 case OPC2_32_BO_LDMST_BR:
3543 gen_ldmst(ctx, r1, temp2);
3544 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3545 break;
3546 case OPC2_32_BO_LDMST_CIRC:
3547 gen_ldmst(ctx, r1, temp2);
3548 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3549 break;
3550 case OPC2_32_BO_SWAP_W_BR:
3551 gen_swap(ctx, r1, temp2);
3552 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3553 break;
3554 case OPC2_32_BO_SWAP_W_CIRC:
3555 gen_swap(ctx, r1, temp2);
3556 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3557 break;
3559 tcg_temp_free(temp);
3560 tcg_temp_free(temp2);
3561 tcg_temp_free(temp3);
3564 static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1)
3566 int r1, r2;
3567 int32_t address;
3568 TCGv temp;
3570 r1 = MASK_OP_BOL_S1D(ctx->opcode);
3571 r2 = MASK_OP_BOL_S2(ctx->opcode);
3572 address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode);
3574 switch (op1) {
3575 case OPC1_32_BOL_LD_A_LONGOFF:
3576 temp = tcg_temp_new();
3577 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
3578 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
3579 tcg_temp_free(temp);
3580 break;
3581 case OPC1_32_BOL_LD_W_LONGOFF:
3582 temp = tcg_temp_new();
3583 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
3584 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
3585 tcg_temp_free(temp);
3586 break;
3587 case OPC1_32_BOL_LEA_LONGOFF:
3588 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
3589 break;
3590 case OPC1_32_BOL_ST_A_LONGOFF:
3591 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3592 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL);
3593 } else {
3594 /* raise illegal opcode trap */
3596 break;
3597 case OPC1_32_BOL_ST_W_LONGOFF:
3598 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL);
3599 break;
3600 case OPC1_32_BOL_LD_B_LONGOFF:
3601 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3602 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
3603 } else {
3604 /* raise illegal opcode trap */
3606 break;
3607 case OPC1_32_BOL_LD_BU_LONGOFF:
3608 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3609 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_UB);
3610 } else {
3611 /* raise illegal opcode trap */
3613 break;
3614 case OPC1_32_BOL_LD_H_LONGOFF:
3615 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3616 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
3617 } else {
3618 /* raise illegal opcode trap */
3620 break;
3621 case OPC1_32_BOL_LD_HU_LONGOFF:
3622 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3623 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUW);
3624 } else {
3625 /* raise illegal opcode trap */
3627 break;
3628 case OPC1_32_BOL_ST_B_LONGOFF:
3629 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3630 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
3631 } else {
3632 /* raise illegal opcode trap */
3634 break;
3635 case OPC1_32_BOL_ST_H_LONGOFF:
3636 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3637 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
3638 } else {
3639 /* raise illegal opcode trap */
3641 break;
3645 /* RC format */
3646 static void decode_rc_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
3648 uint32_t op2;
3649 int r1, r2;
3650 int32_t const9;
3651 TCGv temp;
3653 r2 = MASK_OP_RC_D(ctx->opcode);
3654 r1 = MASK_OP_RC_S1(ctx->opcode);
3655 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3656 op2 = MASK_OP_RC_OP2(ctx->opcode);
3658 temp = tcg_temp_new();
3660 switch (op2) {
3661 case OPC2_32_RC_AND:
3662 tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3663 break;
3664 case OPC2_32_RC_ANDN:
3665 tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
3666 break;
3667 case OPC2_32_RC_NAND:
3668 tcg_gen_movi_tl(temp, const9);
3669 tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
3670 break;
3671 case OPC2_32_RC_NOR:
3672 tcg_gen_movi_tl(temp, const9);
3673 tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
3674 break;
3675 case OPC2_32_RC_OR:
3676 tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3677 break;
3678 case OPC2_32_RC_ORN:
3679 tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
3680 break;
3681 case OPC2_32_RC_SH:
3682 const9 = sextract32(const9, 0, 6);
3683 gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3684 break;
3685 case OPC2_32_RC_SH_H:
3686 const9 = sextract32(const9, 0, 5);
3687 gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3688 break;
3689 case OPC2_32_RC_SHA:
3690 const9 = sextract32(const9, 0, 6);
3691 gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3692 break;
3693 case OPC2_32_RC_SHA_H:
3694 const9 = sextract32(const9, 0, 5);
3695 gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3696 break;
3697 case OPC2_32_RC_SHAS:
3698 gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3699 break;
3700 case OPC2_32_RC_XNOR:
3701 tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3702 tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
3703 break;
3704 case OPC2_32_RC_XOR:
3705 tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3706 break;
3708 tcg_temp_free(temp);
3711 static void decode_rc_accumulator(CPUTriCoreState *env, DisasContext *ctx)
3713 uint32_t op2;
3714 int r1, r2;
3715 int16_t const9;
3717 TCGv temp;
3719 r2 = MASK_OP_RC_D(ctx->opcode);
3720 r1 = MASK_OP_RC_S1(ctx->opcode);
3721 const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
3723 op2 = MASK_OP_RC_OP2(ctx->opcode);
3725 temp = tcg_temp_new();
3727 switch (op2) {
3728 case OPC2_32_RC_ABSDIF:
3729 gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3730 break;
3731 case OPC2_32_RC_ABSDIFS:
3732 gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3733 break;
3734 case OPC2_32_RC_ADD:
3735 gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3736 break;
3737 case OPC2_32_RC_ADDC:
3738 gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3739 break;
3740 case OPC2_32_RC_ADDS:
3741 gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3742 break;
3743 case OPC2_32_RC_ADDS_U:
3744 gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3745 break;
3746 case OPC2_32_RC_ADDX:
3747 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3748 break;
3749 case OPC2_32_RC_AND_EQ:
3750 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3751 const9, &tcg_gen_and_tl);
3752 break;
3753 case OPC2_32_RC_AND_GE:
3754 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3755 const9, &tcg_gen_and_tl);
3756 break;
3757 case OPC2_32_RC_AND_GE_U:
3758 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3759 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3760 const9, &tcg_gen_and_tl);
3761 break;
3762 case OPC2_32_RC_AND_LT:
3763 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3764 const9, &tcg_gen_and_tl);
3765 break;
3766 case OPC2_32_RC_AND_LT_U:
3767 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3768 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3769 const9, &tcg_gen_and_tl);
3770 break;
3771 case OPC2_32_RC_AND_NE:
3772 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3773 const9, &tcg_gen_and_tl);
3774 break;
3775 case OPC2_32_RC_EQ:
3776 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3777 break;
3778 case OPC2_32_RC_EQANY_B:
3779 gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3780 break;
3781 case OPC2_32_RC_EQANY_H:
3782 gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3783 break;
3784 case OPC2_32_RC_GE:
3785 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3786 break;
3787 case OPC2_32_RC_GE_U:
3788 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3789 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3790 break;
3791 case OPC2_32_RC_LT:
3792 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3793 break;
3794 case OPC2_32_RC_LT_U:
3795 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3796 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3797 break;
3798 case OPC2_32_RC_MAX:
3799 tcg_gen_movi_tl(temp, const9);
3800 tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3801 cpu_gpr_d[r1], temp);
3802 break;
3803 case OPC2_32_RC_MAX_U:
3804 tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
3805 tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3806 cpu_gpr_d[r1], temp);
3807 break;
3808 case OPC2_32_RC_MIN:
3809 tcg_gen_movi_tl(temp, const9);
3810 tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3811 cpu_gpr_d[r1], temp);
3812 break;
3813 case OPC2_32_RC_MIN_U:
3814 tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
3815 tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3816 cpu_gpr_d[r1], temp);
3817 break;
3818 case OPC2_32_RC_NE:
3819 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3820 break;
3821 case OPC2_32_RC_OR_EQ:
3822 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3823 const9, &tcg_gen_or_tl);
3824 break;
3825 case OPC2_32_RC_OR_GE:
3826 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3827 const9, &tcg_gen_or_tl);
3828 break;
3829 case OPC2_32_RC_OR_GE_U:
3830 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3831 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3832 const9, &tcg_gen_or_tl);
3833 break;
3834 case OPC2_32_RC_OR_LT:
3835 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3836 const9, &tcg_gen_or_tl);
3837 break;
3838 case OPC2_32_RC_OR_LT_U:
3839 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3840 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3841 const9, &tcg_gen_or_tl);
3842 break;
3843 case OPC2_32_RC_OR_NE:
3844 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3845 const9, &tcg_gen_or_tl);
3846 break;
3847 case OPC2_32_RC_RSUB:
3848 tcg_gen_movi_tl(temp, const9);
3849 gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3850 break;
3851 case OPC2_32_RC_RSUBS:
3852 tcg_gen_movi_tl(temp, const9);
3853 gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3854 break;
3855 case OPC2_32_RC_RSUBS_U:
3856 tcg_gen_movi_tl(temp, const9);
3857 gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3858 break;
3859 case OPC2_32_RC_SH_EQ:
3860 gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3861 break;
3862 case OPC2_32_RC_SH_GE:
3863 gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3864 break;
3865 case OPC2_32_RC_SH_GE_U:
3866 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3867 gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3868 break;
3869 case OPC2_32_RC_SH_LT:
3870 gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3871 break;
3872 case OPC2_32_RC_SH_LT_U:
3873 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3874 gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3875 break;
3876 case OPC2_32_RC_SH_NE:
3877 gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3878 break;
3879 case OPC2_32_RC_XOR_EQ:
3880 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3881 const9, &tcg_gen_xor_tl);
3882 break;
3883 case OPC2_32_RC_XOR_GE:
3884 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3885 const9, &tcg_gen_xor_tl);
3886 break;
3887 case OPC2_32_RC_XOR_GE_U:
3888 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3889 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3890 const9, &tcg_gen_xor_tl);
3891 break;
3892 case OPC2_32_RC_XOR_LT:
3893 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3894 const9, &tcg_gen_xor_tl);
3895 break;
3896 case OPC2_32_RC_XOR_LT_U:
3897 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3898 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3899 const9, &tcg_gen_xor_tl);
3900 break;
3901 case OPC2_32_RC_XOR_NE:
3902 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3903 const9, &tcg_gen_xor_tl);
3904 break;
3906 tcg_temp_free(temp);
3909 static void decode_rc_serviceroutine(CPUTriCoreState *env, DisasContext *ctx)
3911 uint32_t op2;
3912 uint32_t const9;
3914 op2 = MASK_OP_RC_OP2(ctx->opcode);
3915 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3917 switch (op2) {
3918 case OPC2_32_RC_BISR:
3919 gen_helper_1arg(bisr, const9);
3920 break;
3921 case OPC2_32_RC_SYSCALL:
3922 /* TODO: Add exception generation */
3923 break;
3927 static void decode_rc_mul(CPUTriCoreState *env, DisasContext *ctx)
3929 uint32_t op2;
3930 int r1, r2;
3931 int16_t const9;
3933 r2 = MASK_OP_RC_D(ctx->opcode);
3934 r1 = MASK_OP_RC_S1(ctx->opcode);
3935 const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
3937 op2 = MASK_OP_RC_OP2(ctx->opcode);
3939 switch (op2) {
3940 case OPC2_32_RC_MUL_32:
3941 gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3942 break;
3943 case OPC2_32_RC_MUL_64:
3944 gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
3945 break;
3946 case OPC2_32_RC_MULS_32:
3947 gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3948 break;
3949 case OPC2_32_RC_MUL_U_64:
3950 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3951 gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
3952 break;
3953 case OPC2_32_RC_MULS_U_32:
3954 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3955 gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3956 break;
3960 /* RCPW format */
3961 static void decode_rcpw_insert(CPUTriCoreState *env, DisasContext *ctx)
3963 uint32_t op2;
3964 int r1, r2;
3965 int32_t pos, width, const4;
3967 TCGv temp;
3969 op2 = MASK_OP_RCPW_OP2(ctx->opcode);
3970 r1 = MASK_OP_RCPW_S1(ctx->opcode);
3971 r2 = MASK_OP_RCPW_D(ctx->opcode);
3972 const4 = MASK_OP_RCPW_CONST4(ctx->opcode);
3973 width = MASK_OP_RCPW_WIDTH(ctx->opcode);
3974 pos = MASK_OP_RCPW_POS(ctx->opcode);
3976 switch (op2) {
3977 case OPC2_32_RCPW_IMASK:
3978 /* if pos + width > 31 undefined result */
3979 if (pos + width <= 31) {
3980 tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
3981 tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
3983 break;
3984 case OPC2_32_RCPW_INSERT:
3985 /* if pos + width > 32 undefined result */
3986 if (pos + width <= 32) {
3987 temp = tcg_const_i32(const4);
3988 tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
3989 tcg_temp_free(temp);
3991 break;
3995 /* RCRW format */
3997 static void decode_rcrw_insert(CPUTriCoreState *env, DisasContext *ctx)
3999 uint32_t op2;
4000 int r1, r3, r4;
4001 int32_t width, const4;
4003 TCGv temp, temp2, temp3;
4005 op2 = MASK_OP_RCRW_OP2(ctx->opcode);
4006 r1 = MASK_OP_RCRW_S1(ctx->opcode);
4007 r3 = MASK_OP_RCRW_S3(ctx->opcode);
4008 r4 = MASK_OP_RCRW_D(ctx->opcode);
4009 width = MASK_OP_RCRW_WIDTH(ctx->opcode);
4010 const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
4012 temp = tcg_temp_new();
4013 temp2 = tcg_temp_new();
4015 switch (op2) {
4016 case OPC2_32_RCRW_IMASK:
4017 tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f);
4018 tcg_gen_movi_tl(temp2, (1 << width) - 1);
4019 tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp);
4020 tcg_gen_movi_tl(temp2, const4);
4021 tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp);
4022 break;
4023 case OPC2_32_RCRW_INSERT:
4024 temp3 = tcg_temp_new();
4026 tcg_gen_movi_tl(temp, width);
4027 tcg_gen_movi_tl(temp2, const4);
4028 tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f);
4029 gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3);
4031 tcg_temp_free(temp3);
4032 break;
4034 tcg_temp_free(temp);
4035 tcg_temp_free(temp2);
4038 /* RCR format */
4040 static void decode_rcr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
4042 uint32_t op2;
4043 int r1, r3, r4;
4044 int32_t const9;
4046 TCGv temp, temp2;
4048 op2 = MASK_OP_RCR_OP2(ctx->opcode);
4049 r1 = MASK_OP_RCR_S1(ctx->opcode);
4050 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
4051 r3 = MASK_OP_RCR_S3(ctx->opcode);
4052 r4 = MASK_OP_RCR_D(ctx->opcode);
4054 switch (op2) {
4055 case OPC2_32_RCR_CADD:
4056 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
4057 cpu_gpr_d[r4]);
4058 break;
4059 case OPC2_32_RCR_CADDN:
4060 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
4061 cpu_gpr_d[r4]);
4062 break;
4063 case OPC2_32_RCR_SEL:
4064 temp = tcg_const_i32(0);
4065 temp2 = tcg_const_i32(const9);
4066 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
4067 cpu_gpr_d[r1], temp2);
4068 tcg_temp_free(temp);
4069 tcg_temp_free(temp2);
4070 break;
4071 case OPC2_32_RCR_SELN:
4072 temp = tcg_const_i32(0);
4073 temp2 = tcg_const_i32(const9);
4074 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
4075 cpu_gpr_d[r1], temp2);
4076 tcg_temp_free(temp);
4077 tcg_temp_free(temp2);
4078 break;
4082 static void decode_rcr_madd(CPUTriCoreState *env, DisasContext *ctx)
4084 uint32_t op2;
4085 int r1, r3, r4;
4086 int32_t const9;
4089 op2 = MASK_OP_RCR_OP2(ctx->opcode);
4090 r1 = MASK_OP_RCR_S1(ctx->opcode);
4091 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
4092 r3 = MASK_OP_RCR_S3(ctx->opcode);
4093 r4 = MASK_OP_RCR_D(ctx->opcode);
4095 switch (op2) {
4096 case OPC2_32_RCR_MADD_32:
4097 gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4098 break;
4099 case OPC2_32_RCR_MADD_64:
4100 gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4101 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4102 break;
4103 case OPC2_32_RCR_MADDS_32:
4104 gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4105 break;
4106 case OPC2_32_RCR_MADDS_64:
4107 gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4108 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4109 break;
4110 case OPC2_32_RCR_MADD_U_64:
4111 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4112 gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4113 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4114 break;
4115 case OPC2_32_RCR_MADDS_U_32:
4116 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4117 gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4118 break;
4119 case OPC2_32_RCR_MADDS_U_64:
4120 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4121 gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4122 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4123 break;
4127 static void decode_rcr_msub(CPUTriCoreState *env, DisasContext *ctx)
4129 uint32_t op2;
4130 int r1, r3, r4;
4131 int32_t const9;
4134 op2 = MASK_OP_RCR_OP2(ctx->opcode);
4135 r1 = MASK_OP_RCR_S1(ctx->opcode);
4136 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
4137 r3 = MASK_OP_RCR_S3(ctx->opcode);
4138 r4 = MASK_OP_RCR_D(ctx->opcode);
4140 switch (op2) {
4141 case OPC2_32_RCR_MSUB_32:
4142 gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4143 break;
4144 case OPC2_32_RCR_MSUB_64:
4145 gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4146 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4147 break;
4148 case OPC2_32_RCR_MSUBS_32:
4149 gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4150 break;
4151 case OPC2_32_RCR_MSUBS_64:
4152 gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4153 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4154 break;
4155 case OPC2_32_RCR_MSUB_U_64:
4156 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4157 gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4158 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4159 break;
4160 case OPC2_32_RCR_MSUBS_U_32:
4161 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4162 gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
4163 break;
4164 case OPC2_32_RCR_MSUBS_U_64:
4165 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
4166 gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
4167 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
4168 break;
4172 /* RLC format */
4174 static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx,
4175 uint32_t op1)
4177 int32_t const16;
4178 int r1, r2;
4180 const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
4181 r1 = MASK_OP_RLC_S1(ctx->opcode);
4182 r2 = MASK_OP_RLC_D(ctx->opcode);
4184 switch (op1) {
4185 case OPC1_32_RLC_ADDI:
4186 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
4187 break;
4188 case OPC1_32_RLC_ADDIH:
4189 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
4190 break;
4191 case OPC1_32_RLC_ADDIH_A:
4192 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
4193 break;
4194 case OPC1_32_RLC_MFCR:
4195 const16 = MASK_OP_RLC_CONST16(ctx->opcode);
4196 gen_mfcr(env, cpu_gpr_d[r2], const16);
4197 break;
4198 case OPC1_32_RLC_MOV:
4199 tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
4200 break;
4201 case OPC1_32_RLC_MOV_64:
4202 if (tricore_feature(env, TRICORE_FEATURE_16)) {
4203 if ((r2 & 0x1) != 0) {
4204 /* TODO: raise OPD trap */
4206 tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
4207 tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
4208 } else {
4209 /* TODO: raise illegal opcode trap */
4211 break;
4212 case OPC1_32_RLC_MOV_U:
4213 const16 = MASK_OP_RLC_CONST16(ctx->opcode);
4214 tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
4215 break;
4216 case OPC1_32_RLC_MOV_H:
4217 tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
4218 break;
4219 case OPC1_32_RLC_MOVH_A:
4220 tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
4221 break;
4222 case OPC1_32_RLC_MTCR:
4223 const16 = MASK_OP_RLC_CONST16(ctx->opcode);
4224 gen_mtcr(env, ctx, cpu_gpr_d[r1], const16);
4225 break;
4229 /* RR format */
4230 static void decode_rr_accumulator(CPUTriCoreState *env, DisasContext *ctx)
4232 uint32_t op2;
4233 int r3, r2, r1;
4235 r3 = MASK_OP_RR_D(ctx->opcode);
4236 r2 = MASK_OP_RR_S2(ctx->opcode);
4237 r1 = MASK_OP_RR_S1(ctx->opcode);
4238 op2 = MASK_OP_RR_OP2(ctx->opcode);
4240 switch (op2) {
4241 case OPC2_32_RR_ABS:
4242 gen_abs(cpu_gpr_d[r3], cpu_gpr_d[r2]);
4243 break;
4244 case OPC2_32_RR_ABS_B:
4245 gen_helper_abs_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
4246 break;
4247 case OPC2_32_RR_ABS_H:
4248 gen_helper_abs_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
4249 break;
4250 case OPC2_32_RR_ABSDIF:
4251 gen_absdif(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4252 break;
4253 case OPC2_32_RR_ABSDIF_B:
4254 gen_helper_absdif_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4255 cpu_gpr_d[r2]);
4256 break;
4257 case OPC2_32_RR_ABSDIF_H:
4258 gen_helper_absdif_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4259 cpu_gpr_d[r2]);
4260 break;
4261 case OPC2_32_RR_ABSDIFS:
4262 gen_helper_absdif_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4263 cpu_gpr_d[r2]);
4264 break;
4265 case OPC2_32_RR_ABSDIFS_H:
4266 gen_helper_absdif_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4267 cpu_gpr_d[r2]);
4268 break;
4269 case OPC2_32_RR_ABSS:
4270 gen_helper_abs_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
4271 break;
4272 case OPC2_32_RR_ABSS_H:
4273 gen_helper_abs_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
4274 break;
4275 case OPC2_32_RR_ADD:
4276 gen_add_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4277 break;
4278 case OPC2_32_RR_ADD_B:
4279 gen_helper_add_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
4280 break;
4281 case OPC2_32_RR_ADD_H:
4282 gen_helper_add_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
4283 break;
4284 case OPC2_32_RR_ADDC:
4285 gen_addc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4286 break;
4287 case OPC2_32_RR_ADDS:
4288 gen_adds(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4289 break;
4290 case OPC2_32_RR_ADDS_H:
4291 gen_helper_add_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4292 cpu_gpr_d[r2]);
4293 break;
4294 case OPC2_32_RR_ADDS_HU:
4295 gen_helper_add_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4296 cpu_gpr_d[r2]);
4297 break;
4298 case OPC2_32_RR_ADDS_U:
4299 gen_helper_add_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4300 cpu_gpr_d[r2]);
4301 break;
4302 case OPC2_32_RR_ADDX:
4303 gen_add_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4304 break;
4305 case OPC2_32_RR_AND_EQ:
4306 gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
4307 cpu_gpr_d[r2], &tcg_gen_and_tl);
4308 break;
4309 case OPC2_32_RR_AND_GE:
4310 gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4311 cpu_gpr_d[r2], &tcg_gen_and_tl);
4312 break;
4313 case OPC2_32_RR_AND_GE_U:
4314 gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4315 cpu_gpr_d[r2], &tcg_gen_and_tl);
4316 break;
4317 case OPC2_32_RR_AND_LT:
4318 gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4319 cpu_gpr_d[r2], &tcg_gen_and_tl);
4320 break;
4321 case OPC2_32_RR_AND_LT_U:
4322 gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4323 cpu_gpr_d[r2], &tcg_gen_and_tl);
4324 break;
4325 case OPC2_32_RR_AND_NE:
4326 gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4327 cpu_gpr_d[r2], &tcg_gen_and_tl);
4328 break;
4329 case OPC2_32_RR_EQ:
4330 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
4331 cpu_gpr_d[r2]);
4332 break;
4333 case OPC2_32_RR_EQ_B:
4334 gen_helper_eq_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4335 break;
4336 case OPC2_32_RR_EQ_H:
4337 gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4338 break;
4339 case OPC2_32_RR_EQ_W:
4340 gen_cond_w(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4341 break;
4342 case OPC2_32_RR_EQANY_B:
4343 gen_helper_eqany_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4344 break;
4345 case OPC2_32_RR_EQANY_H:
4346 gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4347 break;
4348 case OPC2_32_RR_GE:
4349 tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4350 cpu_gpr_d[r2]);
4351 break;
4352 case OPC2_32_RR_GE_U:
4353 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4354 cpu_gpr_d[r2]);
4355 break;
4356 case OPC2_32_RR_LT:
4357 tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4358 cpu_gpr_d[r2]);
4359 break;
4360 case OPC2_32_RR_LT_U:
4361 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4362 cpu_gpr_d[r2]);
4363 break;
4364 case OPC2_32_RR_LT_B:
4365 gen_helper_lt_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4366 break;
4367 case OPC2_32_RR_LT_BU:
4368 gen_helper_lt_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4369 break;
4370 case OPC2_32_RR_LT_H:
4371 gen_helper_lt_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4372 break;
4373 case OPC2_32_RR_LT_HU:
4374 gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4375 break;
4376 case OPC2_32_RR_LT_W:
4377 gen_cond_w(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4378 break;
4379 case OPC2_32_RR_LT_WU:
4380 gen_cond_w(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4381 break;
4382 case OPC2_32_RR_MAX:
4383 tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4384 cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4385 break;
4386 case OPC2_32_RR_MAX_U:
4387 tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4388 cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4389 break;
4390 case OPC2_32_RR_MAX_B:
4391 gen_helper_max_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4392 break;
4393 case OPC2_32_RR_MAX_BU:
4394 gen_helper_max_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4395 break;
4396 case OPC2_32_RR_MAX_H:
4397 gen_helper_max_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4398 break;
4399 case OPC2_32_RR_MAX_HU:
4400 gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4401 break;
4402 case OPC2_32_RR_MIN:
4403 tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4404 cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4405 break;
4406 case OPC2_32_RR_MIN_U:
4407 tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4408 cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4409 break;
4410 case OPC2_32_RR_MIN_B:
4411 gen_helper_min_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4412 break;
4413 case OPC2_32_RR_MIN_BU:
4414 gen_helper_min_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4415 break;
4416 case OPC2_32_RR_MIN_H:
4417 gen_helper_min_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4418 break;
4419 case OPC2_32_RR_MIN_HU:
4420 gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4421 break;
4422 case OPC2_32_RR_MOV:
4423 tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
4424 break;
4425 case OPC2_32_RR_NE:
4426 tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4427 cpu_gpr_d[r2]);
4428 break;
4429 case OPC2_32_RR_OR_EQ:
4430 gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
4431 cpu_gpr_d[r2], &tcg_gen_or_tl);
4432 break;
4433 case OPC2_32_RR_OR_GE:
4434 gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4435 cpu_gpr_d[r2], &tcg_gen_or_tl);
4436 break;
4437 case OPC2_32_RR_OR_GE_U:
4438 gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4439 cpu_gpr_d[r2], &tcg_gen_or_tl);
4440 break;
4441 case OPC2_32_RR_OR_LT:
4442 gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4443 cpu_gpr_d[r2], &tcg_gen_or_tl);
4444 break;
4445 case OPC2_32_RR_OR_LT_U:
4446 gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4447 cpu_gpr_d[r2], &tcg_gen_or_tl);
4448 break;
4449 case OPC2_32_RR_OR_NE:
4450 gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4451 cpu_gpr_d[r2], &tcg_gen_or_tl);
4452 break;
4453 case OPC2_32_RR_SAT_B:
4454 gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7f, -0x80);
4455 break;
4456 case OPC2_32_RR_SAT_BU:
4457 gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xff);
4458 break;
4459 case OPC2_32_RR_SAT_H:
4460 gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7fff, -0x8000);
4461 break;
4462 case OPC2_32_RR_SAT_HU:
4463 gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xffff);
4464 break;
4465 case OPC2_32_RR_SH_EQ:
4466 gen_sh_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
4467 cpu_gpr_d[r2]);
4468 break;
4469 case OPC2_32_RR_SH_GE:
4470 gen_sh_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4471 cpu_gpr_d[r2]);
4472 break;
4473 case OPC2_32_RR_SH_GE_U:
4474 gen_sh_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4475 cpu_gpr_d[r2]);
4476 break;
4477 case OPC2_32_RR_SH_LT:
4478 gen_sh_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4479 cpu_gpr_d[r2]);
4480 break;
4481 case OPC2_32_RR_SH_LT_U:
4482 gen_sh_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4483 cpu_gpr_d[r2]);
4484 break;
4485 case OPC2_32_RR_SH_NE:
4486 gen_sh_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4487 cpu_gpr_d[r2]);
4488 break;
4489 case OPC2_32_RR_SUB:
4490 gen_sub_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4491 break;
4492 case OPC2_32_RR_SUB_B:
4493 gen_helper_sub_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
4494 break;
4495 case OPC2_32_RR_SUB_H:
4496 gen_helper_sub_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
4497 break;
4498 case OPC2_32_RR_SUBC:
4499 gen_subc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4500 break;
4501 case OPC2_32_RR_SUBS:
4502 gen_subs(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4503 break;
4504 case OPC2_32_RR_SUBS_U:
4505 gen_subsu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4506 break;
4507 case OPC2_32_RR_SUBS_H:
4508 gen_helper_sub_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4509 cpu_gpr_d[r2]);
4510 break;
4511 case OPC2_32_RR_SUBS_HU:
4512 gen_helper_sub_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
4513 cpu_gpr_d[r2]);
4514 break;
4515 case OPC2_32_RR_SUBX:
4516 gen_sub_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4517 break;
4518 case OPC2_32_RR_XOR_EQ:
4519 gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
4520 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4521 break;
4522 case OPC2_32_RR_XOR_GE:
4523 gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4524 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4525 break;
4526 case OPC2_32_RR_XOR_GE_U:
4527 gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4528 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4529 break;
4530 case OPC2_32_RR_XOR_LT:
4531 gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
4532 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4533 break;
4534 case OPC2_32_RR_XOR_LT_U:
4535 gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
4536 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4537 break;
4538 case OPC2_32_RR_XOR_NE:
4539 gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
4540 cpu_gpr_d[r2], &tcg_gen_xor_tl);
4541 break;
4545 static void decode_rr_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
4547 uint32_t op2;
4548 int r3, r2, r1;
4549 TCGv temp;
4551 r3 = MASK_OP_RR_D(ctx->opcode);
4552 r2 = MASK_OP_RR_S2(ctx->opcode);
4553 r1 = MASK_OP_RR_S1(ctx->opcode);
4555 temp = tcg_temp_new();
4556 op2 = MASK_OP_RR_OP2(ctx->opcode);
4558 switch (op2) {
4559 case OPC2_32_RR_AND:
4560 tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4561 break;
4562 case OPC2_32_RR_ANDN:
4563 tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4564 break;
4565 case OPC2_32_RR_CLO:
4566 gen_helper_clo(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4567 break;
4568 case OPC2_32_RR_CLO_H:
4569 gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4570 break;
4571 case OPC2_32_RR_CLS:
4572 gen_helper_cls(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4573 break;
4574 case OPC2_32_RR_CLS_H:
4575 gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4576 break;
4577 case OPC2_32_RR_CLZ:
4578 gen_helper_clz(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4579 break;
4580 case OPC2_32_RR_CLZ_H:
4581 gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4582 break;
4583 case OPC2_32_RR_NAND:
4584 tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4585 break;
4586 case OPC2_32_RR_NOR:
4587 tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4588 break;
4589 case OPC2_32_RR_OR:
4590 tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4591 break;
4592 case OPC2_32_RR_ORN:
4593 tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4594 break;
4595 case OPC2_32_RR_SH:
4596 gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4597 break;
4598 case OPC2_32_RR_SH_H:
4599 gen_helper_sh_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4600 break;
4601 case OPC2_32_RR_SHA:
4602 gen_helper_sha(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
4603 break;
4604 case OPC2_32_RR_SHA_H:
4605 gen_helper_sha_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4606 break;
4607 case OPC2_32_RR_SHAS:
4608 gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4609 break;
4610 case OPC2_32_RR_XNOR:
4611 tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4612 break;
4613 case OPC2_32_RR_XOR:
4614 tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4615 break;
4617 tcg_temp_free(temp);
4620 static void decode_rr_address(CPUTriCoreState *env, DisasContext *ctx)
4622 uint32_t op2, n;
4623 int r1, r2, r3;
4624 TCGv temp;
4626 op2 = MASK_OP_RR_OP2(ctx->opcode);
4627 r3 = MASK_OP_RR_D(ctx->opcode);
4628 r2 = MASK_OP_RR_S2(ctx->opcode);
4629 r1 = MASK_OP_RR_S1(ctx->opcode);
4630 n = MASK_OP_RR_N(ctx->opcode);
4632 switch (op2) {
4633 case OPC2_32_RR_ADD_A:
4634 tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
4635 break;
4636 case OPC2_32_RR_ADDSC_A:
4637 temp = tcg_temp_new();
4638 tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n);
4639 tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
4640 tcg_temp_free(temp);
4641 break;
4642 case OPC2_32_RR_ADDSC_AT:
4643 temp = tcg_temp_new();
4644 tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3);
4645 tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp);
4646 tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
4647 tcg_temp_free(temp);
4648 break;
4649 case OPC2_32_RR_EQ_A:
4650 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
4651 cpu_gpr_a[r2]);
4652 break;
4653 case OPC2_32_RR_EQZ:
4654 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
4655 break;
4656 case OPC2_32_RR_GE_A:
4657 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
4658 cpu_gpr_a[r2]);
4659 break;
4660 case OPC2_32_RR_LT_A:
4661 tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
4662 cpu_gpr_a[r2]);
4663 break;
4664 case OPC2_32_RR_MOV_A:
4665 tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]);
4666 break;
4667 case OPC2_32_RR_MOV_AA:
4668 tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]);
4669 break;
4670 case OPC2_32_RR_MOV_D:
4671 tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]);
4672 break;
4673 case OPC2_32_RR_NE_A:
4674 tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
4675 cpu_gpr_a[r2]);
4676 break;
4677 case OPC2_32_RR_NEZ_A:
4678 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
4679 break;
4680 case OPC2_32_RR_SUB_A:
4681 tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
4682 break;
4686 static void decode_rr_idirect(CPUTriCoreState *env, DisasContext *ctx)
4688 uint32_t op2;
4689 int r1;
4691 op2 = MASK_OP_RR_OP2(ctx->opcode);
4692 r1 = MASK_OP_RR_S1(ctx->opcode);
4694 switch (op2) {
4695 case OPC2_32_RR_JI:
4696 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
4697 break;
4698 case OPC2_32_RR_JLI:
4699 tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
4700 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
4701 break;
4702 case OPC2_32_RR_CALLI:
4703 gen_helper_1arg(call, ctx->next_pc);
4704 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
4705 break;
4707 tcg_gen_exit_tb(0);
4708 ctx->bstate = BS_BRANCH;
4711 static void decode_rr_divide(CPUTriCoreState *env, DisasContext *ctx)
4713 uint32_t op2;
4714 int r1, r2, r3;
4716 TCGv temp, temp2;
4718 op2 = MASK_OP_RR_OP2(ctx->opcode);
4719 r3 = MASK_OP_RR_D(ctx->opcode);
4720 r2 = MASK_OP_RR_S2(ctx->opcode);
4721 r1 = MASK_OP_RR_S1(ctx->opcode);
4723 switch (op2) {
4724 case OPC2_32_RR_BMERGE:
4725 gen_helper_bmerge(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
4726 break;
4727 case OPC2_32_RR_BSPLIT:
4728 gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
4729 break;
4730 case OPC2_32_RR_DVINIT_B:
4731 gen_dvinit_b(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
4732 cpu_gpr_d[r2]);
4733 break;
4734 case OPC2_32_RR_DVINIT_BU:
4735 temp = tcg_temp_new();
4736 temp2 = tcg_temp_new();
4737 /* reset av */
4738 tcg_gen_movi_tl(cpu_PSW_AV, 0);
4739 if (!tricore_feature(env, TRICORE_FEATURE_131)) {
4740 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
4741 tcg_gen_neg_tl(temp, cpu_gpr_d[r3+1]);
4742 /* use cpu_PSW_AV to compare against 0 */
4743 tcg_gen_movcond_tl(TCG_COND_LT, temp, cpu_gpr_d[r3+1], cpu_PSW_AV,
4744 temp, cpu_gpr_d[r3+1]);
4745 tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
4746 tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
4747 temp2, cpu_gpr_d[r2]);
4748 tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
4749 } else {
4750 /* overflow = (D[b] == 0) */
4751 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
4753 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
4754 /* sv */
4755 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
4756 /* write result */
4757 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 8);
4758 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
4759 tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp);
4761 tcg_temp_free(temp);
4762 tcg_temp_free(temp2);
4763 break;
4764 case OPC2_32_RR_DVINIT_H:
4765 gen_dvinit_h(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
4766 cpu_gpr_d[r2]);
4767 break;
4768 case OPC2_32_RR_DVINIT_HU:
4769 temp = tcg_temp_new();
4770 temp2 = tcg_temp_new();
4771 /* reset av */
4772 tcg_gen_movi_tl(cpu_PSW_AV, 0);
4773 if (!tricore_feature(env, TRICORE_FEATURE_131)) {
4774 /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
4775 tcg_gen_neg_tl(temp, cpu_gpr_d[r3+1]);
4776 /* use cpu_PSW_AV to compare against 0 */
4777 tcg_gen_movcond_tl(TCG_COND_LT, temp, cpu_gpr_d[r3+1], cpu_PSW_AV,
4778 temp, cpu_gpr_d[r3+1]);
4779 tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
4780 tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
4781 temp2, cpu_gpr_d[r2]);
4782 tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
4783 } else {
4784 /* overflow = (D[b] == 0) */
4785 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
4787 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
4788 /* sv */
4789 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
4790 /* write result */
4791 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
4792 tcg_gen_shri_tl(cpu_gpr_d[r3+1], temp, 16);
4793 tcg_gen_shli_tl(cpu_gpr_d[r3], temp, 16);
4794 tcg_temp_free(temp);
4795 tcg_temp_free(temp2);
4796 break;
4797 case OPC2_32_RR_DVINIT:
4798 temp = tcg_temp_new();
4799 temp2 = tcg_temp_new();
4800 /* overflow = ((D[b] == 0) ||
4801 ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
4802 tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
4803 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
4804 tcg_gen_and_tl(temp, temp, temp2);
4805 tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
4806 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
4807 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
4808 /* sv */
4809 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
4810 /* reset av */
4811 tcg_gen_movi_tl(cpu_PSW_AV, 0);
4812 /* write result */
4813 tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4814 /* sign extend to high reg */
4815 tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31);
4816 tcg_temp_free(temp);
4817 tcg_temp_free(temp2);
4818 break;
4819 case OPC2_32_RR_DVINIT_U:
4820 /* overflow = (D[b] == 0) */
4821 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
4822 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
4823 /* sv */
4824 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
4825 /* reset av */
4826 tcg_gen_movi_tl(cpu_PSW_AV, 0);
4827 /* write result */
4828 tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4829 /* zero extend to high reg*/
4830 tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0);
4831 break;
4832 case OPC2_32_RR_PARITY:
4833 gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]);
4834 break;
4835 case OPC2_32_RR_UNPACK:
4836 gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
4837 break;
4841 /* RR1 Format */
4842 static void decode_rr1_mul(CPUTriCoreState *env, DisasContext *ctx)
4844 uint32_t op2;
4846 int r1, r2, r3;
4847 TCGv n;
4848 TCGv_i64 temp64;
4850 r1 = MASK_OP_RR1_S1(ctx->opcode);
4851 r2 = MASK_OP_RR1_S2(ctx->opcode);
4852 r3 = MASK_OP_RR1_D(ctx->opcode);
4853 n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode));
4854 op2 = MASK_OP_RR1_OP2(ctx->opcode);
4856 switch (op2) {
4857 case OPC2_32_RR1_MUL_H_32_LL:
4858 temp64 = tcg_temp_new_i64();
4859 GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4860 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4861 gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
4862 tcg_temp_free_i64(temp64);
4863 break;
4864 case OPC2_32_RR1_MUL_H_32_LU:
4865 temp64 = tcg_temp_new_i64();
4866 GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4867 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4868 gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
4869 tcg_temp_free_i64(temp64);
4870 break;
4871 case OPC2_32_RR1_MUL_H_32_UL:
4872 temp64 = tcg_temp_new_i64();
4873 GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4874 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4875 gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
4876 tcg_temp_free_i64(temp64);
4877 break;
4878 case OPC2_32_RR1_MUL_H_32_UU:
4879 temp64 = tcg_temp_new_i64();
4880 GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4881 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4882 gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
4883 tcg_temp_free_i64(temp64);
4884 break;
4885 case OPC2_32_RR1_MULM_H_64_LL:
4886 temp64 = tcg_temp_new_i64();
4887 GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4888 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4889 /* reset V bit */
4890 tcg_gen_movi_tl(cpu_PSW_V, 0);
4891 /* reset AV bit */
4892 tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
4893 tcg_temp_free_i64(temp64);
4894 break;
4895 case OPC2_32_RR1_MULM_H_64_LU:
4896 temp64 = tcg_temp_new_i64();
4897 GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4898 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4899 /* reset V bit */
4900 tcg_gen_movi_tl(cpu_PSW_V, 0);
4901 /* reset AV bit */
4902 tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
4903 tcg_temp_free_i64(temp64);
4904 break;
4905 case OPC2_32_RR1_MULM_H_64_UL:
4906 temp64 = tcg_temp_new_i64();
4907 GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4908 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4909 /* reset V bit */
4910 tcg_gen_movi_tl(cpu_PSW_V, 0);
4911 /* reset AV bit */
4912 tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
4913 tcg_temp_free_i64(temp64);
4914 break;
4915 case OPC2_32_RR1_MULM_H_64_UU:
4916 temp64 = tcg_temp_new_i64();
4917 GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4918 tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
4919 /* reset V bit */
4920 tcg_gen_movi_tl(cpu_PSW_V, 0);
4921 /* reset AV bit */
4922 tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
4923 tcg_temp_free_i64(temp64);
4925 break;
4926 case OPC2_32_RR1_MULR_H_16_LL:
4927 GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4928 gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
4929 break;
4930 case OPC2_32_RR1_MULR_H_16_LU:
4931 GEN_HELPER_LU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4932 gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
4933 break;
4934 case OPC2_32_RR1_MULR_H_16_UL:
4935 GEN_HELPER_UL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4936 gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
4937 break;
4938 case OPC2_32_RR1_MULR_H_16_UU:
4939 GEN_HELPER_UU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
4940 gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
4941 break;
4943 tcg_temp_free(n);
4946 static void decode_rr1_mulq(CPUTriCoreState *env, DisasContext *ctx)
4948 uint32_t op2;
4949 int r1, r2, r3;
4950 uint32_t n;
4952 TCGv temp, temp2;
4954 r1 = MASK_OP_RR1_S1(ctx->opcode);
4955 r2 = MASK_OP_RR1_S2(ctx->opcode);
4956 r3 = MASK_OP_RR1_D(ctx->opcode);
4957 n = MASK_OP_RR1_N(ctx->opcode);
4958 op2 = MASK_OP_RR1_OP2(ctx->opcode);
4960 temp = tcg_temp_new();
4961 temp2 = tcg_temp_new();
4963 switch (op2) {
4964 case OPC2_32_RR1_MUL_Q_32:
4965 gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2], n, 32);
4966 break;
4967 case OPC2_32_RR1_MUL_Q_64:
4968 gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
4969 n, 0);
4970 break;
4971 case OPC2_32_RR1_MUL_Q_32_L:
4972 tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
4973 gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
4974 break;
4975 case OPC2_32_RR1_MUL_Q_64_L:
4976 tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
4977 gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
4978 break;
4979 case OPC2_32_RR1_MUL_Q_32_U:
4980 tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
4981 gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
4982 break;
4983 case OPC2_32_RR1_MUL_Q_64_U:
4984 tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
4985 gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
4986 break;
4987 case OPC2_32_RR1_MUL_Q_32_LL:
4988 tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
4989 tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
4990 gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
4991 break;
4992 case OPC2_32_RR1_MUL_Q_32_UU:
4993 tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
4994 tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
4995 gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
4996 break;
4997 case OPC2_32_RR1_MULR_Q_32_L:
4998 tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
4999 tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
5000 gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
5001 break;
5002 case OPC2_32_RR1_MULR_Q_32_U:
5003 tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
5004 tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
5005 gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
5006 break;
5008 tcg_temp_free(temp);
5009 tcg_temp_free(temp2);
5012 /* RR2 format */
5013 static void decode_rr2_mul(CPUTriCoreState *env, DisasContext *ctx)
5015 uint32_t op2;
5016 int r1, r2, r3;
5018 op2 = MASK_OP_RR2_OP2(ctx->opcode);
5019 r1 = MASK_OP_RR2_S1(ctx->opcode);
5020 r2 = MASK_OP_RR2_S2(ctx->opcode);
5021 r3 = MASK_OP_RR2_D(ctx->opcode);
5022 switch (op2) {
5023 case OPC2_32_RR2_MUL_32:
5024 gen_mul_i32s(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
5025 break;
5026 case OPC2_32_RR2_MUL_64:
5027 gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
5028 cpu_gpr_d[r2]);
5029 break;
5030 case OPC2_32_RR2_MULS_32:
5031 gen_helper_mul_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
5032 cpu_gpr_d[r2]);
5033 break;
5034 case OPC2_32_RR2_MUL_U_64:
5035 gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
5036 cpu_gpr_d[r2]);
5037 break;
5038 case OPC2_32_RR2_MULS_U_32:
5039 gen_helper_mul_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
5040 cpu_gpr_d[r2]);
5041 break;
5045 /* RRPW format */
5046 static void decode_rrpw_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
5048 uint32_t op2;
5049 int r1, r2, r3;
5050 int32_t pos, width;
5052 op2 = MASK_OP_RRPW_OP2(ctx->opcode);
5053 r1 = MASK_OP_RRPW_S1(ctx->opcode);
5054 r2 = MASK_OP_RRPW_S2(ctx->opcode);
5055 r3 = MASK_OP_RRPW_D(ctx->opcode);
5056 pos = MASK_OP_RRPW_POS(ctx->opcode);
5057 width = MASK_OP_RRPW_WIDTH(ctx->opcode);
5059 switch (op2) {
5060 case OPC2_32_RRPW_EXTR:
5061 if (pos + width <= 31) {
5062 /* optimize special cases */
5063 if ((pos == 0) && (width == 8)) {
5064 tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
5065 } else if ((pos == 0) && (width == 16)) {
5066 tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
5067 } else {
5068 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width);
5069 tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width);
5072 break;
5073 case OPC2_32_RRPW_EXTR_U:
5074 if (width == 0) {
5075 tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
5076 } else {
5077 tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos);
5078 tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width));
5080 break;
5081 case OPC2_32_RRPW_IMASK:
5082 if (pos + width <= 31) {
5083 tcg_gen_movi_tl(cpu_gpr_d[r3+1], ((1u << width) - 1) << pos);
5084 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
5086 break;
5087 case OPC2_32_RRPW_INSERT:
5088 if (pos + width <= 31) {
5089 tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
5090 width, pos);
5092 break;
5096 /* RRR format */
5097 static void decode_rrr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
5099 uint32_t op2;
5100 int r1, r2, r3, r4;
5101 TCGv temp;
5103 op2 = MASK_OP_RRR_OP2(ctx->opcode);
5104 r1 = MASK_OP_RRR_S1(ctx->opcode);
5105 r2 = MASK_OP_RRR_S2(ctx->opcode);
5106 r3 = MASK_OP_RRR_S3(ctx->opcode);
5107 r4 = MASK_OP_RRR_D(ctx->opcode);
5109 switch (op2) {
5110 case OPC2_32_RRR_CADD:
5111 gen_cond_add(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
5112 cpu_gpr_d[r4], cpu_gpr_d[r3]);
5113 break;
5114 case OPC2_32_RRR_CADDN:
5115 gen_cond_add(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
5116 cpu_gpr_d[r3]);
5117 break;
5118 case OPC2_32_RRR_CSUB:
5119 gen_cond_sub(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
5120 cpu_gpr_d[r3]);
5121 break;
5122 case OPC2_32_RRR_CSUBN:
5123 gen_cond_sub(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
5124 cpu_gpr_d[r3]);
5125 break;
5126 case OPC2_32_RRR_SEL:
5127 temp = tcg_const_i32(0);
5128 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
5129 cpu_gpr_d[r1], cpu_gpr_d[r2]);
5130 tcg_temp_free(temp);
5131 break;
5132 case OPC2_32_RRR_SELN:
5133 temp = tcg_const_i32(0);
5134 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
5135 cpu_gpr_d[r1], cpu_gpr_d[r2]);
5136 tcg_temp_free(temp);
5137 break;
5141 static void decode_rrr_divide(CPUTriCoreState *env, DisasContext *ctx)
5143 uint32_t op2;
5145 int r1, r2, r3, r4;
5147 op2 = MASK_OP_RRR_OP2(ctx->opcode);
5148 r1 = MASK_OP_RRR_S1(ctx->opcode);
5149 r2 = MASK_OP_RRR_S2(ctx->opcode);
5150 r3 = MASK_OP_RRR_S3(ctx->opcode);
5151 r4 = MASK_OP_RRR_D(ctx->opcode);
5153 switch (op2) {
5154 case OPC2_32_RRR_DVADJ:
5155 GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5156 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5157 break;
5158 case OPC2_32_RRR_DVSTEP:
5159 GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5160 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5161 break;
5162 case OPC2_32_RRR_DVSTEP_U:
5163 GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5164 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5165 break;
5166 case OPC2_32_RRR_IXMAX:
5167 GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5168 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5169 break;
5170 case OPC2_32_RRR_IXMAX_U:
5171 GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5172 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5173 break;
5174 case OPC2_32_RRR_IXMIN:
5175 GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5176 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5177 break;
5178 case OPC2_32_RRR_IXMIN_U:
5179 GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
5180 cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
5181 break;
5182 case OPC2_32_RRR_PACK:
5183 gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
5184 cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
5185 break;
5189 static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
5191 int op1;
5192 int32_t r1, r2, r3;
5193 int32_t address, const16;
5194 int8_t b, const4;
5195 int32_t bpos;
5196 TCGv temp, temp2, temp3;
5198 op1 = MASK_OP_MAJOR(ctx->opcode);
5200 /* handle JNZ.T opcode only being 7 bit long */
5201 if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) {
5202 op1 = OPCM_32_BRN_JTT;
5205 switch (op1) {
5206 /* ABS-format */
5207 case OPCM_32_ABS_LDW:
5208 decode_abs_ldw(env, ctx);
5209 break;
5210 case OPCM_32_ABS_LDB:
5211 decode_abs_ldb(env, ctx);
5212 break;
5213 case OPCM_32_ABS_LDMST_SWAP:
5214 decode_abs_ldst_swap(env, ctx);
5215 break;
5216 case OPCM_32_ABS_LDST_CONTEXT:
5217 decode_abs_ldst_context(env, ctx);
5218 break;
5219 case OPCM_32_ABS_STORE:
5220 decode_abs_store(env, ctx);
5221 break;
5222 case OPCM_32_ABS_STOREB_H:
5223 decode_abs_storeb_h(env, ctx);
5224 break;
5225 case OPC1_32_ABS_STOREQ:
5226 address = MASK_OP_ABS_OFF18(ctx->opcode);
5227 r1 = MASK_OP_ABS_S1D(ctx->opcode);
5228 temp = tcg_const_i32(EA_ABS_FORMAT(address));
5229 temp2 = tcg_temp_new();
5231 tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
5232 tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
5234 tcg_temp_free(temp2);
5235 tcg_temp_free(temp);
5236 break;
5237 case OPC1_32_ABS_LD_Q:
5238 address = MASK_OP_ABS_OFF18(ctx->opcode);
5239 r1 = MASK_OP_ABS_S1D(ctx->opcode);
5240 temp = tcg_const_i32(EA_ABS_FORMAT(address));
5242 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
5243 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
5245 tcg_temp_free(temp);
5246 break;
5247 case OPC1_32_ABS_LEA:
5248 address = MASK_OP_ABS_OFF18(ctx->opcode);
5249 r1 = MASK_OP_ABS_S1D(ctx->opcode);
5250 tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
5251 break;
5252 /* ABSB-format */
5253 case OPC1_32_ABSB_ST_T:
5254 address = MASK_OP_ABS_OFF18(ctx->opcode);
5255 b = MASK_OP_ABSB_B(ctx->opcode);
5256 bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
5258 temp = tcg_const_i32(EA_ABS_FORMAT(address));
5259 temp2 = tcg_temp_new();
5261 tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
5262 tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
5263 tcg_gen_ori_tl(temp2, temp2, (b << bpos));
5264 tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
5266 tcg_temp_free(temp);
5267 tcg_temp_free(temp2);
5268 break;
5269 /* B-format */
5270 case OPC1_32_B_CALL:
5271 case OPC1_32_B_CALLA:
5272 case OPC1_32_B_J:
5273 case OPC1_32_B_JA:
5274 case OPC1_32_B_JL:
5275 case OPC1_32_B_JLA:
5276 address = MASK_OP_B_DISP24_SEXT(ctx->opcode);
5277 gen_compute_branch(ctx, op1, 0, 0, 0, address);
5278 break;
5279 /* Bit-format */
5280 case OPCM_32_BIT_ANDACC:
5281 decode_bit_andacc(env, ctx);
5282 break;
5283 case OPCM_32_BIT_LOGICAL_T1:
5284 decode_bit_logical_t(env, ctx);
5285 break;
5286 case OPCM_32_BIT_INSERT:
5287 decode_bit_insert(env, ctx);
5288 break;
5289 case OPCM_32_BIT_LOGICAL_T2:
5290 decode_bit_logical_t2(env, ctx);
5291 break;
5292 case OPCM_32_BIT_ORAND:
5293 decode_bit_orand(env, ctx);
5294 break;
5295 case OPCM_32_BIT_SH_LOGIC1:
5296 decode_bit_sh_logic1(env, ctx);
5297 break;
5298 case OPCM_32_BIT_SH_LOGIC2:
5299 decode_bit_sh_logic2(env, ctx);
5300 break;
5301 /* BO Format */
5302 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE:
5303 decode_bo_addrmode_post_pre_base(env, ctx);
5304 break;
5305 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR:
5306 decode_bo_addrmode_bitreverse_circular(env, ctx);
5307 break;
5308 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE:
5309 decode_bo_addrmode_ld_post_pre_base(env, ctx);
5310 break;
5311 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR:
5312 decode_bo_addrmode_ld_bitreverse_circular(env, ctx);
5313 break;
5314 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE:
5315 decode_bo_addrmode_stctx_post_pre_base(env, ctx);
5316 break;
5317 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR:
5318 decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx);
5319 break;
5320 /* BOL-format */
5321 case OPC1_32_BOL_LD_A_LONGOFF:
5322 case OPC1_32_BOL_LD_W_LONGOFF:
5323 case OPC1_32_BOL_LEA_LONGOFF:
5324 case OPC1_32_BOL_ST_W_LONGOFF:
5325 case OPC1_32_BOL_ST_A_LONGOFF:
5326 case OPC1_32_BOL_LD_B_LONGOFF:
5327 case OPC1_32_BOL_LD_BU_LONGOFF:
5328 case OPC1_32_BOL_LD_H_LONGOFF:
5329 case OPC1_32_BOL_LD_HU_LONGOFF:
5330 case OPC1_32_BOL_ST_B_LONGOFF:
5331 case OPC1_32_BOL_ST_H_LONGOFF:
5332 decode_bol_opc(env, ctx, op1);
5333 break;
5334 /* BRC Format */
5335 case OPCM_32_BRC_EQ_NEQ:
5336 case OPCM_32_BRC_GE:
5337 case OPCM_32_BRC_JLT:
5338 case OPCM_32_BRC_JNE:
5339 const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode);
5340 address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode);
5341 r1 = MASK_OP_BRC_S1(ctx->opcode);
5342 gen_compute_branch(ctx, op1, r1, 0, const4, address);
5343 break;
5344 /* BRN Format */
5345 case OPCM_32_BRN_JTT:
5346 address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode);
5347 r1 = MASK_OP_BRN_S1(ctx->opcode);
5348 gen_compute_branch(ctx, op1, r1, 0, 0, address);
5349 break;
5350 /* BRR Format */
5351 case OPCM_32_BRR_EQ_NEQ:
5352 case OPCM_32_BRR_ADDR_EQ_NEQ:
5353 case OPCM_32_BRR_GE:
5354 case OPCM_32_BRR_JLT:
5355 case OPCM_32_BRR_JNE:
5356 case OPCM_32_BRR_JNZ:
5357 case OPCM_32_BRR_LOOP:
5358 address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode);
5359 r2 = MASK_OP_BRR_S2(ctx->opcode);
5360 r1 = MASK_OP_BRR_S1(ctx->opcode);
5361 gen_compute_branch(ctx, op1, r1, r2, 0, address);
5362 break;
5363 /* RC Format */
5364 case OPCM_32_RC_LOGICAL_SHIFT:
5365 decode_rc_logical_shift(env, ctx);
5366 break;
5367 case OPCM_32_RC_ACCUMULATOR:
5368 decode_rc_accumulator(env, ctx);
5369 break;
5370 case OPCM_32_RC_SERVICEROUTINE:
5371 decode_rc_serviceroutine(env, ctx);
5372 break;
5373 case OPCM_32_RC_MUL:
5374 decode_rc_mul(env, ctx);
5375 break;
5376 /* RCPW Format */
5377 case OPCM_32_RCPW_MASK_INSERT:
5378 decode_rcpw_insert(env, ctx);
5379 break;
5380 /* RCRR Format */
5381 case OPC1_32_RCRR_INSERT:
5382 r1 = MASK_OP_RCRR_S1(ctx->opcode);
5383 r2 = MASK_OP_RCRR_S3(ctx->opcode);
5384 r3 = MASK_OP_RCRR_D(ctx->opcode);
5385 const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
5386 temp = tcg_const_i32(const16);
5387 temp2 = tcg_temp_new(); /* width*/
5388 temp3 = tcg_temp_new(); /* pos */
5390 tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
5391 tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
5393 gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
5395 tcg_temp_free(temp);
5396 tcg_temp_free(temp2);
5397 tcg_temp_free(temp3);
5398 break;
5399 /* RCRW Format */
5400 case OPCM_32_RCRW_MASK_INSERT:
5401 decode_rcrw_insert(env, ctx);
5402 break;
5403 /* RCR Format */
5404 case OPCM_32_RCR_COND_SELECT:
5405 decode_rcr_cond_select(env, ctx);
5406 break;
5407 case OPCM_32_RCR_MADD:
5408 decode_rcr_madd(env, ctx);
5409 break;
5410 case OPCM_32_RCR_MSUB:
5411 decode_rcr_msub(env, ctx);
5412 break;
5413 /* RLC Format */
5414 case OPC1_32_RLC_ADDI:
5415 case OPC1_32_RLC_ADDIH:
5416 case OPC1_32_RLC_ADDIH_A:
5417 case OPC1_32_RLC_MFCR:
5418 case OPC1_32_RLC_MOV:
5419 case OPC1_32_RLC_MOV_64:
5420 case OPC1_32_RLC_MOV_U:
5421 case OPC1_32_RLC_MOV_H:
5422 case OPC1_32_RLC_MOVH_A:
5423 case OPC1_32_RLC_MTCR:
5424 decode_rlc_opc(env, ctx, op1);
5425 break;
5426 /* RR Format */
5427 case OPCM_32_RR_ACCUMULATOR:
5428 decode_rr_accumulator(env, ctx);
5429 break;
5430 case OPCM_32_RR_LOGICAL_SHIFT:
5431 decode_rr_logical_shift(env, ctx);
5432 break;
5433 case OPCM_32_RR_ADDRESS:
5434 decode_rr_address(env, ctx);
5435 break;
5436 case OPCM_32_RR_IDIRECT:
5437 decode_rr_idirect(env, ctx);
5438 break;
5439 case OPCM_32_RR_DIVIDE:
5440 decode_rr_divide(env, ctx);
5441 break;
5442 /* RR1 Format */
5443 case OPCM_32_RR1_MUL:
5444 decode_rr1_mul(env, ctx);
5445 break;
5446 case OPCM_32_RR1_MULQ:
5447 decode_rr1_mulq(env, ctx);
5448 break;
5449 /* RR2 format */
5450 case OPCM_32_RR2_MUL:
5451 decode_rr2_mul(env, ctx);
5452 break;
5453 /* RRPW format */
5454 case OPCM_32_RRPW_EXTRACT_INSERT:
5455 decode_rrpw_extract_insert(env, ctx);
5456 break;
5457 case OPC1_32_RRPW_DEXTR:
5458 r1 = MASK_OP_RRPW_S1(ctx->opcode);
5459 r2 = MASK_OP_RRPW_S2(ctx->opcode);
5460 r3 = MASK_OP_RRPW_D(ctx->opcode);
5461 const16 = MASK_OP_RRPW_POS(ctx->opcode);
5462 if (r1 == r2) {
5463 tcg_gen_rotli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], const16);
5464 } else {
5465 temp = tcg_temp_new();
5466 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], const16);
5467 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 32 - const16);
5468 tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
5469 tcg_temp_free(temp);
5471 break;
5472 /* RRR Format */
5473 case OPCM_32_RRR_COND_SELECT:
5474 decode_rrr_cond_select(env, ctx);
5475 break;
5476 case OPCM_32_RRR_DIVIDE:
5477 decode_rrr_divide(env, ctx);
5481 static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
5483 /* 16-Bit Instruction */
5484 if ((ctx->opcode & 0x1) == 0) {
5485 ctx->next_pc = ctx->pc + 2;
5486 decode_16Bit_opc(env, ctx);
5487 /* 32-Bit Instruction */
5488 } else {
5489 ctx->next_pc = ctx->pc + 4;
5490 decode_32Bit_opc(env, ctx);
5494 static inline void
5495 gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
5496 int search_pc)
5498 CPUState *cs = CPU(cpu);
5499 CPUTriCoreState *env = &cpu->env;
5500 DisasContext ctx;
5501 target_ulong pc_start;
5502 int num_insns;
5504 if (search_pc) {
5505 qemu_log("search pc %d\n", search_pc);
5508 num_insns = 0;
5509 pc_start = tb->pc;
5510 ctx.pc = pc_start;
5511 ctx.saved_pc = -1;
5512 ctx.tb = tb;
5513 ctx.singlestep_enabled = cs->singlestep_enabled;
5514 ctx.bstate = BS_NONE;
5515 ctx.mem_idx = cpu_mmu_index(env);
5517 tcg_clear_temp_count();
5518 gen_tb_start(tb);
5519 while (ctx.bstate == BS_NONE) {
5520 ctx.opcode = cpu_ldl_code(env, ctx.pc);
5521 decode_opc(env, &ctx, 0);
5523 num_insns++;
5525 if (tcg_op_buf_full()) {
5526 gen_save_pc(ctx.next_pc);
5527 tcg_gen_exit_tb(0);
5528 break;
5530 if (singlestep) {
5531 gen_save_pc(ctx.next_pc);
5532 tcg_gen_exit_tb(0);
5533 break;
5535 ctx.pc = ctx.next_pc;
5538 gen_tb_end(tb, num_insns);
5539 if (search_pc) {
5540 printf("done_generating search pc\n");
5541 } else {
5542 tb->size = ctx.pc - pc_start;
5543 tb->icount = num_insns;
5545 if (tcg_check_temp_count()) {
5546 printf("LEAK at %08x\n", env->PC);
5549 #ifdef DEBUG_DISAS
5550 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5551 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5552 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
5553 qemu_log("\n");
5555 #endif
5558 void
5559 gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
5561 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
5564 void
5565 gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
5567 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
5570 void
5571 restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
5573 env->PC = tcg_ctx.gen_opc_pc[pc_pos];
5577 * Initialization
5581 void cpu_state_reset(CPUTriCoreState *env)
5583 /* Reset Regs to Default Value */
5584 env->PSW = 0xb80;
5587 static void tricore_tcg_init_csfr(void)
5589 cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
5590 offsetof(CPUTriCoreState, PCXI), "PCXI");
5591 cpu_PSW = tcg_global_mem_new(TCG_AREG0,
5592 offsetof(CPUTriCoreState, PSW), "PSW");
5593 cpu_PC = tcg_global_mem_new(TCG_AREG0,
5594 offsetof(CPUTriCoreState, PC), "PC");
5595 cpu_ICR = tcg_global_mem_new(TCG_AREG0,
5596 offsetof(CPUTriCoreState, ICR), "ICR");
5599 void tricore_tcg_init(void)
5601 int i;
5602 static int inited;
5603 if (inited) {
5604 return;
5606 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5607 /* reg init */
5608 for (i = 0 ; i < 16 ; i++) {
5609 cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
5610 offsetof(CPUTriCoreState, gpr_a[i]),
5611 regnames_a[i]);
5613 for (i = 0 ; i < 16 ; i++) {
5614 cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
5615 offsetof(CPUTriCoreState, gpr_d[i]),
5616 regnames_d[i]);
5618 tricore_tcg_init_csfr();
5619 /* init PSW flag cache */
5620 cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
5621 offsetof(CPUTriCoreState, PSW_USB_C),
5622 "PSW_C");
5623 cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
5624 offsetof(CPUTriCoreState, PSW_USB_V),
5625 "PSW_V");
5626 cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
5627 offsetof(CPUTriCoreState, PSW_USB_SV),
5628 "PSW_SV");
5629 cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
5630 offsetof(CPUTriCoreState, PSW_USB_AV),
5631 "PSW_AV");
5632 cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
5633 offsetof(CPUTriCoreState, PSW_USB_SAV),
5634 "PSW_SAV");