hw/arm/boot: fix uninitialized scalar variable warning reported by coverity
[qemu/ar7.git] / target-tricore / translate.c
blob65abf453f051e3868bb89be4d5e23624df6827b7
1 /*
2 * TriCore emulation for qemu: main translation routines.
4 * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "tricore-opcodes.h"
32 * TCG registers
34 static TCGv cpu_PC;
35 static TCGv cpu_PCXI;
36 static TCGv cpu_PSW;
37 static TCGv cpu_ICR;
38 /* GPR registers */
39 static TCGv cpu_gpr_a[16];
40 static TCGv cpu_gpr_d[16];
41 /* PSW Flag cache */
42 static TCGv cpu_PSW_C;
43 static TCGv cpu_PSW_V;
44 static TCGv cpu_PSW_SV;
45 static TCGv cpu_PSW_AV;
46 static TCGv cpu_PSW_SAV;
47 /* CPU env */
48 static TCGv_ptr cpu_env;
50 #include "exec/gen-icount.h"
52 static const char *regnames_a[] = {
53 "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
54 "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
55 "a12" , "a13" , "a14" , "a15",
58 static const char *regnames_d[] = {
59 "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
60 "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
61 "d12" , "d13" , "d14" , "d15",
64 typedef struct DisasContext {
65 struct TranslationBlock *tb;
66 target_ulong pc, saved_pc, next_pc;
67 uint32_t opcode;
68 int singlestep_enabled;
69 /* Routine used to access memory */
70 int mem_idx;
71 uint32_t hflags, saved_hflags;
72 int bstate;
73 } DisasContext;
75 enum {
77 BS_NONE = 0,
78 BS_STOP = 1,
79 BS_BRANCH = 2,
80 BS_EXCP = 3,
83 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
84 fprintf_function cpu_fprintf, int flags)
86 TriCoreCPU *cpu = TRICORE_CPU(cs);
87 CPUTriCoreState *env = &cpu->env;
88 int i;
90 cpu_fprintf(f, "PC=%08x\n", env->PC);
91 for (i = 0; i < 16; ++i) {
92 if ((i & 3) == 0) {
93 cpu_fprintf(f, "GPR A%02d:", i);
95 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_a[i], env->gpr_a[i]);
97 for (i = 0; i < 16; ++i) {
98 if ((i & 3) == 0) {
99 cpu_fprintf(f, "GPR D%02d:", i);
101 cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames_d[i], env->gpr_d[i]);
107 * Functions to generate micro-ops
110 /* Makros for generating helpers */
112 #define gen_helper_1arg(name, arg) do { \
113 TCGv_i32 helper_tmp = tcg_const_i32(arg); \
114 gen_helper_##name(cpu_env, helper_tmp); \
115 tcg_temp_free_i32(helper_tmp); \
116 } while (0)
118 #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
119 #define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
120 ((offset & 0x0fffff) << 1))
122 /* Functions for load/save to/from memory */
124 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
125 int16_t con, TCGMemOp mop)
127 TCGv temp = tcg_temp_new();
128 tcg_gen_addi_tl(temp, r2, con);
129 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
130 tcg_temp_free(temp);
133 static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
134 int16_t con, TCGMemOp mop)
136 TCGv temp = tcg_temp_new();
137 tcg_gen_addi_tl(temp, r2, con);
138 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
139 tcg_temp_free(temp);
142 static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
144 TCGv_i64 temp = tcg_temp_new_i64();
146 tcg_gen_concat_i32_i64(temp, rl, rh);
147 tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
149 tcg_temp_free_i64(temp);
152 static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
153 DisasContext *ctx)
155 TCGv temp = tcg_temp_new();
156 tcg_gen_addi_tl(temp, base, con);
157 gen_st_2regs_64(rh, rl, temp, ctx);
158 tcg_temp_free(temp);
161 static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
163 TCGv_i64 temp = tcg_temp_new_i64();
165 tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
166 /* write back to two 32 bit regs */
167 tcg_gen_extr_i64_i32(rl, rh, temp);
169 tcg_temp_free_i64(temp);
172 static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
173 DisasContext *ctx)
175 TCGv temp = tcg_temp_new();
176 tcg_gen_addi_tl(temp, base, con);
177 gen_ld_2regs_64(rh, rl, temp, ctx);
178 tcg_temp_free(temp);
181 static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
182 TCGMemOp mop)
184 TCGv temp = tcg_temp_new();
185 tcg_gen_addi_tl(temp, r2, off);
186 tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
187 tcg_gen_mov_tl(r2, temp);
188 tcg_temp_free(temp);
191 static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
192 TCGMemOp mop)
194 TCGv temp = tcg_temp_new();
195 tcg_gen_addi_tl(temp, r2, off);
196 tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
197 tcg_gen_mov_tl(r2, temp);
198 tcg_temp_free(temp);
201 /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
202 static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
204 TCGv temp = tcg_temp_new();
205 TCGv temp2 = tcg_temp_new();
207 /* temp = (M(EA, word) */
208 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
209 /* temp = temp & ~E[a][63:32]) */
210 tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
211 /* temp2 = (E[a][31:0] & E[a][63:32]); */
212 tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
213 /* temp = temp | temp2; */
214 tcg_gen_or_tl(temp, temp, temp2);
215 /* M(EA, word) = temp; */
216 tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
218 tcg_temp_free(temp);
219 tcg_temp_free(temp2);
222 /* tmp = M(EA, word);
223 M(EA, word) = D[a];
224 D[a] = tmp[31:0];*/
225 static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
227 TCGv temp = tcg_temp_new();
229 tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
230 tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
231 tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
233 tcg_temp_free(temp);
236 /* We generate loads and store to core special function register (csfr) through
237 the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
238 makros R, A and E, which allow read-only, all and endinit protected access.
239 These makros also specify in which ISA version the csfr was introduced. */
240 #define R(ADDRESS, REG, FEATURE) \
241 case ADDRESS: \
242 if (tricore_feature(env, FEATURE)) { \
243 tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
245 break;
246 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
247 #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
248 static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
250 /* since we're caching PSW make this a special case */
251 if (offset == 0xfe04) {
252 gen_helper_psw_read(ret, cpu_env);
253 } else {
254 switch (offset) {
255 #include "csfr.def"
259 #undef R
260 #undef A
261 #undef E
263 #define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
264 since no execption occurs */
265 #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
266 case ADDRESS: \
267 if (tricore_feature(env, FEATURE)) { \
268 tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
270 break;
271 /* Endinit protected registers
272 TODO: Since the endinit bit is in a register of a not yet implemented
273 watchdog device, we handle endinit protected registers like
274 all-access registers for now. */
275 #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
276 static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
277 int32_t offset)
279 if (ctx->hflags & TRICORE_HFLAG_SM) {
280 /* since we're caching PSW make this a special case */
281 if (offset == 0xfe04) {
282 gen_helper_psw_write(cpu_env, r1);
283 } else {
284 switch (offset) {
285 #include "csfr.def"
288 } else {
289 /* generate privilege trap */
293 /* Functions for arithmetic instructions */
295 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
297 TCGv t0 = tcg_temp_new_i32();
298 TCGv result = tcg_temp_new_i32();
299 /* Addition and set V/SV bits */
300 tcg_gen_add_tl(result, r1, r2);
301 /* calc V bit */
302 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
303 tcg_gen_xor_tl(t0, r1, r2);
304 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
305 /* Calc SV bit */
306 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
307 /* Calc AV/SAV bits */
308 tcg_gen_add_tl(cpu_PSW_AV, result, result);
309 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
310 /* calc SAV */
311 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
312 /* write back result */
313 tcg_gen_mov_tl(ret, result);
315 tcg_temp_free(result);
316 tcg_temp_free(t0);
319 /* ret = r2 + (r1 * r3); */
320 static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
322 TCGv_i64 t1 = tcg_temp_new_i64();
323 TCGv_i64 t2 = tcg_temp_new_i64();
324 TCGv_i64 t3 = tcg_temp_new_i64();
326 tcg_gen_ext_i32_i64(t1, r1);
327 tcg_gen_ext_i32_i64(t2, r2);
328 tcg_gen_ext_i32_i64(t3, r3);
330 tcg_gen_mul_i64(t1, t1, t3);
331 tcg_gen_add_i64(t1, t2, t1);
333 tcg_gen_trunc_i64_i32(ret, t1);
334 /* calc V
335 t1 > 0x7fffffff */
336 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
337 /* t1 < -0x80000000 */
338 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
339 tcg_gen_or_i64(t2, t2, t3);
340 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
341 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
342 /* Calc SV bit */
343 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
344 /* Calc AV/SAV bits */
345 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
346 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
347 /* calc SAV */
348 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
350 tcg_temp_free_i64(t1);
351 tcg_temp_free_i64(t2);
352 tcg_temp_free_i64(t3);
355 static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
357 TCGv temp = tcg_const_i32(con);
358 gen_madd32_d(ret, r1, r2, temp);
359 tcg_temp_free(temp);
362 static inline void
363 gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
364 TCGv r3)
366 TCGv t1 = tcg_temp_new();
367 TCGv t2 = tcg_temp_new();
368 TCGv t3 = tcg_temp_new();
369 TCGv t4 = tcg_temp_new();
371 tcg_gen_muls2_tl(t1, t2, r1, r3);
372 /* only the add can overflow */
373 tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
374 /* calc V bit */
375 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
376 tcg_gen_xor_tl(t1, r2_high, t2);
377 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
378 /* Calc SV bit */
379 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
380 /* Calc AV/SAV bits */
381 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
382 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
383 /* calc SAV */
384 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
385 /* write back the result */
386 tcg_gen_mov_tl(ret_low, t3);
387 tcg_gen_mov_tl(ret_high, t4);
389 tcg_temp_free(t1);
390 tcg_temp_free(t2);
391 tcg_temp_free(t3);
392 tcg_temp_free(t4);
395 static inline void
396 gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
397 TCGv r3)
399 TCGv_i64 t1 = tcg_temp_new_i64();
400 TCGv_i64 t2 = tcg_temp_new_i64();
401 TCGv_i64 t3 = tcg_temp_new_i64();
403 tcg_gen_extu_i32_i64(t1, r1);
404 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
405 tcg_gen_extu_i32_i64(t3, r3);
407 tcg_gen_mul_i64(t1, t1, t3);
408 tcg_gen_add_i64(t2, t2, t1);
409 /* write back result */
410 tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
411 /* only the add overflows, if t2 < t1
412 calc V bit */
413 tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
414 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
415 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
416 /* Calc SV bit */
417 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
418 /* Calc AV/SAV bits */
419 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
420 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
421 /* calc SAV */
422 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
424 tcg_temp_free_i64(t1);
425 tcg_temp_free_i64(t2);
426 tcg_temp_free_i64(t3);
429 static inline void
430 gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
431 int32_t con)
433 TCGv temp = tcg_const_i32(con);
434 gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
435 tcg_temp_free(temp);
438 static inline void
439 gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
440 int32_t con)
442 TCGv temp = tcg_const_i32(con);
443 gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
444 tcg_temp_free(temp);
447 /* ret = r2 - (r1 * r3); */
448 static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
450 TCGv_i64 t1 = tcg_temp_new_i64();
451 TCGv_i64 t2 = tcg_temp_new_i64();
452 TCGv_i64 t3 = tcg_temp_new_i64();
454 tcg_gen_ext_i32_i64(t1, r1);
455 tcg_gen_ext_i32_i64(t2, r2);
456 tcg_gen_ext_i32_i64(t3, r3);
458 tcg_gen_mul_i64(t1, t1, t3);
459 tcg_gen_sub_i64(t1, t2, t1);
461 tcg_gen_trunc_i64_i32(ret, t1);
462 /* calc V
463 t2 > 0x7fffffff */
464 tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
465 /* result < -0x80000000 */
466 tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
467 tcg_gen_or_i64(t2, t2, t3);
468 tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
469 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
471 /* Calc SV bit */
472 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
473 /* Calc AV/SAV bits */
474 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
475 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
476 /* calc SAV */
477 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
479 tcg_temp_free_i64(t1);
480 tcg_temp_free_i64(t2);
481 tcg_temp_free_i64(t3);
484 static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
486 TCGv temp = tcg_const_i32(con);
487 gen_msub32_d(ret, r1, r2, temp);
488 tcg_temp_free(temp);
491 static inline void
492 gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
493 TCGv r3)
495 TCGv t1 = tcg_temp_new();
496 TCGv t2 = tcg_temp_new();
497 TCGv t3 = tcg_temp_new();
498 TCGv t4 = tcg_temp_new();
500 tcg_gen_muls2_tl(t1, t2, r1, r3);
501 /* only the sub can overflow */
502 tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
503 /* calc V bit */
504 tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
505 tcg_gen_xor_tl(t1, r2_high, t2);
506 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
507 /* Calc SV bit */
508 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
509 /* Calc AV/SAV bits */
510 tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
511 tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
512 /* calc SAV */
513 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
514 /* write back the result */
515 tcg_gen_mov_tl(ret_low, t3);
516 tcg_gen_mov_tl(ret_high, t4);
518 tcg_temp_free(t1);
519 tcg_temp_free(t2);
520 tcg_temp_free(t3);
521 tcg_temp_free(t4);
524 static inline void
525 gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
526 int32_t con)
528 TCGv temp = tcg_const_i32(con);
529 gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
530 tcg_temp_free(temp);
533 static inline void
534 gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
535 TCGv r3)
537 TCGv_i64 t1 = tcg_temp_new_i64();
538 TCGv_i64 t2 = tcg_temp_new_i64();
539 TCGv_i64 t3 = tcg_temp_new_i64();
541 tcg_gen_extu_i32_i64(t1, r1);
542 tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
543 tcg_gen_extu_i32_i64(t3, r3);
545 tcg_gen_mul_i64(t1, t1, t3);
546 tcg_gen_sub_i64(t3, t2, t1);
547 tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
548 /* calc V bit, only the sub can overflow, if t1 > t2 */
549 tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
550 tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
551 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
552 /* Calc SV bit */
553 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
554 /* Calc AV/SAV bits */
555 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
556 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
557 /* calc SAV */
558 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
560 tcg_temp_free_i64(t1);
561 tcg_temp_free_i64(t2);
562 tcg_temp_free_i64(t3);
565 static inline void
566 gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
567 int32_t con)
569 TCGv temp = tcg_const_i32(con);
570 gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
571 tcg_temp_free(temp);
574 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
576 TCGv temp = tcg_const_i32(r2);
577 gen_add_d(ret, r1, temp);
578 tcg_temp_free(temp);
580 /* calculate the carry bit too */
581 static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
583 TCGv t0 = tcg_temp_new_i32();
584 TCGv result = tcg_temp_new_i32();
586 tcg_gen_movi_tl(t0, 0);
587 /* Addition and set C/V/SV bits */
588 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
589 /* calc V bit */
590 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
591 tcg_gen_xor_tl(t0, r1, r2);
592 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
593 /* Calc SV bit */
594 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
595 /* Calc AV/SAV bits */
596 tcg_gen_add_tl(cpu_PSW_AV, result, result);
597 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
598 /* calc SAV */
599 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
600 /* write back result */
601 tcg_gen_mov_tl(ret, result);
603 tcg_temp_free(result);
604 tcg_temp_free(t0);
607 static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
609 TCGv temp = tcg_const_i32(con);
610 gen_add_CC(ret, r1, temp);
611 tcg_temp_free(temp);
614 static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
616 TCGv carry = tcg_temp_new_i32();
617 TCGv t0 = tcg_temp_new_i32();
618 TCGv result = tcg_temp_new_i32();
620 tcg_gen_movi_tl(t0, 0);
621 tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
622 /* Addition, carry and set C/V/SV bits */
623 tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
624 tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
625 /* calc V bit */
626 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
627 tcg_gen_xor_tl(t0, r1, r2);
628 tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
629 /* Calc SV bit */
630 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
631 /* Calc AV/SAV bits */
632 tcg_gen_add_tl(cpu_PSW_AV, result, result);
633 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
634 /* calc SAV */
635 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
636 /* write back result */
637 tcg_gen_mov_tl(ret, result);
639 tcg_temp_free(result);
640 tcg_temp_free(t0);
641 tcg_temp_free(carry);
644 static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
646 TCGv temp = tcg_const_i32(con);
647 gen_addc_CC(ret, r1, temp);
648 tcg_temp_free(temp);
651 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
652 TCGv r4)
654 TCGv temp = tcg_temp_new();
655 TCGv temp2 = tcg_temp_new();
656 TCGv result = tcg_temp_new();
657 TCGv mask = tcg_temp_new();
658 TCGv t0 = tcg_const_i32(0);
660 /* create mask for sticky bits */
661 tcg_gen_setcond_tl(cond, mask, r4, t0);
662 tcg_gen_shli_tl(mask, mask, 31);
664 tcg_gen_add_tl(result, r1, r2);
665 /* Calc PSW_V */
666 tcg_gen_xor_tl(temp, result, r1);
667 tcg_gen_xor_tl(temp2, r1, r2);
668 tcg_gen_andc_tl(temp, temp, temp2);
669 tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
670 /* Set PSW_SV */
671 tcg_gen_and_tl(temp, temp, mask);
672 tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
673 /* calc AV bit */
674 tcg_gen_add_tl(temp, result, result);
675 tcg_gen_xor_tl(temp, temp, result);
676 tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
677 /* calc SAV bit */
678 tcg_gen_and_tl(temp, temp, mask);
679 tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
680 /* write back result */
681 tcg_gen_movcond_tl(cond, r3, r4, t0, result, r3);
683 tcg_temp_free(t0);
684 tcg_temp_free(temp);
685 tcg_temp_free(temp2);
686 tcg_temp_free(result);
687 tcg_temp_free(mask);
690 static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
691 TCGv r3, TCGv r4)
693 TCGv temp = tcg_const_i32(r2);
694 gen_cond_add(cond, r1, temp, r3, r4);
695 tcg_temp_free(temp);
698 static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
700 TCGv temp = tcg_temp_new_i32();
701 TCGv result = tcg_temp_new_i32();
703 tcg_gen_sub_tl(result, r1, r2);
704 /* calc V bit */
705 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
706 tcg_gen_xor_tl(temp, r1, r2);
707 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
708 /* calc SV bit */
709 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
710 /* Calc AV bit */
711 tcg_gen_add_tl(cpu_PSW_AV, result, result);
712 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
713 /* calc SAV bit */
714 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
715 /* write back result */
716 tcg_gen_mov_tl(ret, result);
718 tcg_temp_free(temp);
719 tcg_temp_free(result);
722 static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
724 TCGv temp = tcg_temp_new_i32();
725 TCGv result = tcg_temp_new_i32();
727 tcg_gen_sub_tl(result, r1, r2);
728 tcg_gen_sub_tl(temp, r2, r1);
729 tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
731 /* calc V bit */
732 tcg_gen_xor_tl(cpu_PSW_V, result, r1);
733 tcg_gen_xor_tl(temp, result, r2);
734 tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
735 tcg_gen_xor_tl(temp, r1, r2);
736 tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
737 /* calc SV bit */
738 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
739 /* Calc AV bit */
740 tcg_gen_add_tl(cpu_PSW_AV, result, result);
741 tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
742 /* calc SAV bit */
743 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
744 /* write back result */
745 tcg_gen_mov_tl(ret, result);
747 tcg_temp_free(temp);
748 tcg_temp_free(result);
751 static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
753 TCGv temp = tcg_const_i32(con);
754 gen_absdif(ret, r1, temp);
755 tcg_temp_free(temp);
758 static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
760 TCGv temp = tcg_const_i32(con);
761 gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
762 tcg_temp_free(temp);
765 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
767 TCGv high = tcg_temp_new();
768 TCGv low = tcg_temp_new();
770 tcg_gen_muls2_tl(low, high, r1, r2);
771 tcg_gen_mov_tl(ret, low);
772 /* calc V bit */
773 tcg_gen_sari_tl(low, low, 31);
774 tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
775 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
776 /* calc SV bit */
777 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
778 /* Calc AV bit */
779 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
780 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
781 /* calc SAV bit */
782 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
784 tcg_temp_free(high);
785 tcg_temp_free(low);
788 static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
790 TCGv temp = tcg_const_i32(con);
791 gen_mul_i32s(ret, r1, temp);
792 tcg_temp_free(temp);
795 static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
797 tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
798 /* clear V bit */
799 tcg_gen_movi_tl(cpu_PSW_V, 0);
800 /* calc SV bit */
801 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
802 /* Calc AV bit */
803 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
804 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
805 /* calc SAV bit */
806 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
809 static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
810 int32_t con)
812 TCGv temp = tcg_const_i32(con);
813 gen_mul_i64s(ret_low, ret_high, r1, temp);
814 tcg_temp_free(temp);
817 static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
819 tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
820 /* clear V bit */
821 tcg_gen_movi_tl(cpu_PSW_V, 0);
822 /* calc SV bit */
823 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
824 /* Calc AV bit */
825 tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
826 tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
827 /* calc SAV bit */
828 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
831 static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
832 int32_t con)
834 TCGv temp = tcg_const_i32(con);
835 gen_mul_i64u(ret_low, ret_high, r1, temp);
836 tcg_temp_free(temp);
839 static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
841 TCGv temp = tcg_const_i32(con);
842 gen_helper_mul_ssov(ret, cpu_env, r1, temp);
843 tcg_temp_free(temp);
846 static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
848 TCGv temp = tcg_const_i32(con);
849 gen_helper_mul_suov(ret, cpu_env, r1, temp);
850 tcg_temp_free(temp);
852 /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
853 static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
855 TCGv temp = tcg_const_i32(con);
856 gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
857 tcg_temp_free(temp);
860 static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
862 TCGv temp = tcg_const_i32(con);
863 gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
864 tcg_temp_free(temp);
867 static inline void
868 gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
869 int32_t con)
871 TCGv temp = tcg_const_i32(con);
872 TCGv_i64 temp64 = tcg_temp_new_i64();
873 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
874 gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, temp);
875 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
876 tcg_temp_free(temp);
877 tcg_temp_free_i64(temp64);
880 static inline void
881 gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
882 int32_t con)
884 TCGv temp = tcg_const_i32(con);
885 TCGv_i64 temp64 = tcg_temp_new_i64();
886 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
887 gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, temp);
888 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
889 tcg_temp_free(temp);
890 tcg_temp_free_i64(temp64);
893 static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
895 TCGv temp = tcg_const_i32(con);
896 gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
897 tcg_temp_free(temp);
900 static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
902 TCGv temp = tcg_const_i32(con);
903 gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
904 tcg_temp_free(temp);
907 static inline void
908 gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
909 int32_t con)
911 TCGv temp = tcg_const_i32(con);
912 TCGv_i64 temp64 = tcg_temp_new_i64();
913 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
914 gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, temp);
915 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
916 tcg_temp_free(temp);
917 tcg_temp_free_i64(temp64);
920 static inline void
921 gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
922 int32_t con)
924 TCGv temp = tcg_const_i32(con);
925 TCGv_i64 temp64 = tcg_temp_new_i64();
926 tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
927 gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, temp);
928 tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
929 tcg_temp_free(temp);
930 tcg_temp_free_i64(temp64);
933 static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
935 TCGv sat_neg = tcg_const_i32(low);
936 TCGv temp = tcg_const_i32(up);
938 /* sat_neg = (arg < low ) ? low : arg; */
939 tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
941 /* ret = (sat_neg > up ) ? up : sat_neg; */
942 tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
944 tcg_temp_free(sat_neg);
945 tcg_temp_free(temp);
948 static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
950 TCGv temp = tcg_const_i32(up);
951 /* sat_neg = (arg > up ) ? up : arg; */
952 tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
953 tcg_temp_free(temp);
956 static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
958 if (shift_count == -32) {
959 tcg_gen_movi_tl(ret, 0);
960 } else if (shift_count >= 0) {
961 tcg_gen_shli_tl(ret, r1, shift_count);
962 } else {
963 tcg_gen_shri_tl(ret, r1, -shift_count);
967 static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
969 TCGv temp_low, temp_high;
971 if (shiftcount == -16) {
972 tcg_gen_movi_tl(ret, 0);
973 } else {
974 temp_high = tcg_temp_new();
975 temp_low = tcg_temp_new();
977 tcg_gen_andi_tl(temp_low, r1, 0xffff);
978 tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
979 gen_shi(temp_low, temp_low, shiftcount);
980 gen_shi(ret, temp_high, shiftcount);
981 tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
983 tcg_temp_free(temp_low);
984 tcg_temp_free(temp_high);
988 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
990 uint32_t msk, msk_start;
991 TCGv temp = tcg_temp_new();
992 TCGv temp2 = tcg_temp_new();
993 TCGv t_0 = tcg_const_i32(0);
995 if (shift_count == 0) {
996 /* Clear PSW.C and PSW.V */
997 tcg_gen_movi_tl(cpu_PSW_C, 0);
998 tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
999 tcg_gen_mov_tl(ret, r1);
1000 } else if (shift_count == -32) {
1001 /* set PSW.C */
1002 tcg_gen_mov_tl(cpu_PSW_C, r1);
1003 /* fill ret completly with sign bit */
1004 tcg_gen_sari_tl(ret, r1, 31);
1005 /* clear PSW.V */
1006 tcg_gen_movi_tl(cpu_PSW_V, 0);
1007 } else if (shift_count > 0) {
1008 TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
1009 TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
1011 /* calc carry */
1012 msk_start = 32 - shift_count;
1013 msk = ((1 << shift_count) - 1) << msk_start;
1014 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
1015 /* calc v/sv bits */
1016 tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
1017 tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
1018 tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
1019 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1020 /* calc sv */
1021 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
1022 /* do shift */
1023 tcg_gen_shli_tl(ret, r1, shift_count);
1025 tcg_temp_free(t_max);
1026 tcg_temp_free(t_min);
1027 } else {
1028 /* clear PSW.V */
1029 tcg_gen_movi_tl(cpu_PSW_V, 0);
1030 /* calc carry */
1031 msk = (1 << -shift_count) - 1;
1032 tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
1033 /* do shift */
1034 tcg_gen_sari_tl(ret, r1, -shift_count);
1036 /* calc av overflow bit */
1037 tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
1038 tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
1039 /* calc sav overflow bit */
1040 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1042 tcg_temp_free(temp);
1043 tcg_temp_free(temp2);
1044 tcg_temp_free(t_0);
1047 static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
1049 gen_helper_sha_ssov(ret, cpu_env, r1, r2);
1052 static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
1054 TCGv temp = tcg_const_i32(con);
1055 gen_shas(ret, r1, temp);
1056 tcg_temp_free(temp);
1059 static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
1061 TCGv low, high;
1063 if (shift_count == 0) {
1064 tcg_gen_mov_tl(ret, r1);
1065 } else if (shift_count > 0) {
1066 low = tcg_temp_new();
1067 high = tcg_temp_new();
1069 tcg_gen_andi_tl(high, r1, 0xffff0000);
1070 tcg_gen_shli_tl(low, r1, shift_count);
1071 tcg_gen_shli_tl(ret, high, shift_count);
1072 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
1074 tcg_temp_free(low);
1075 tcg_temp_free(high);
1076 } else {
1077 low = tcg_temp_new();
1078 high = tcg_temp_new();
1080 tcg_gen_ext16s_tl(low, r1);
1081 tcg_gen_sari_tl(low, low, -shift_count);
1082 tcg_gen_sari_tl(ret, r1, -shift_count);
1083 tcg_gen_deposit_tl(ret, ret, low, 0, 16);
1085 tcg_temp_free(low);
1086 tcg_temp_free(high);
1091 /* ret = {ret[30:0], (r1 cond r2)}; */
1092 static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
1094 TCGv temp = tcg_temp_new();
1095 TCGv temp2 = tcg_temp_new();
1097 tcg_gen_shli_tl(temp, ret, 1);
1098 tcg_gen_setcond_tl(cond, temp2, r1, r2);
1099 tcg_gen_or_tl(ret, temp, temp2);
1101 tcg_temp_free(temp);
1102 tcg_temp_free(temp2);
1105 static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
1107 TCGv temp = tcg_const_i32(con);
1108 gen_sh_cond(cond, ret, r1, temp);
1109 tcg_temp_free(temp);
1112 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
1114 gen_helper_add_ssov(ret, cpu_env, r1, r2);
1117 static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
1119 TCGv temp = tcg_const_i32(con);
1120 gen_helper_add_ssov(ret, cpu_env, r1, temp);
1121 tcg_temp_free(temp);
1124 static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
1126 TCGv temp = tcg_const_i32(con);
1127 gen_helper_add_suov(ret, cpu_env, r1, temp);
1128 tcg_temp_free(temp);
1131 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
1133 gen_helper_sub_ssov(ret, cpu_env, r1, r2);
1136 static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
1138 gen_helper_sub_suov(ret, cpu_env, r1, r2);
1141 static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
1142 int pos1, int pos2,
1143 void(*op1)(TCGv, TCGv, TCGv),
1144 void(*op2)(TCGv, TCGv, TCGv))
1146 TCGv temp1, temp2;
1148 temp1 = tcg_temp_new();
1149 temp2 = tcg_temp_new();
1151 tcg_gen_shri_tl(temp2, r2, pos2);
1152 tcg_gen_shri_tl(temp1, r1, pos1);
1154 (*op1)(temp1, temp1, temp2);
1155 (*op2)(temp1 , ret, temp1);
1157 tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
1159 tcg_temp_free(temp1);
1160 tcg_temp_free(temp2);
1163 /* ret = r1[pos1] op1 r2[pos2]; */
1164 static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
1165 int pos1, int pos2,
1166 void(*op1)(TCGv, TCGv, TCGv))
1168 TCGv temp1, temp2;
1170 temp1 = tcg_temp_new();
1171 temp2 = tcg_temp_new();
1173 tcg_gen_shri_tl(temp2, r2, pos2);
1174 tcg_gen_shri_tl(temp1, r1, pos1);
1176 (*op1)(ret, temp1, temp2);
1178 tcg_gen_andi_tl(ret, ret, 0x1);
1180 tcg_temp_free(temp1);
1181 tcg_temp_free(temp2);
1184 static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
1185 void(*op)(TCGv, TCGv, TCGv))
1187 TCGv temp = tcg_temp_new();
1188 TCGv temp2 = tcg_temp_new();
1189 /* temp = (arg1 cond arg2 )*/
1190 tcg_gen_setcond_tl(cond, temp, r1, r2);
1191 /* temp2 = ret[0]*/
1192 tcg_gen_andi_tl(temp2, ret, 0x1);
1193 /* temp = temp insn temp2 */
1194 (*op)(temp, temp, temp2);
1195 /* ret = {ret[31:1], temp} */
1196 tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
1198 tcg_temp_free(temp);
1199 tcg_temp_free(temp2);
1202 static inline void
1203 gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
1204 void(*op)(TCGv, TCGv, TCGv))
1206 TCGv temp = tcg_const_i32(con);
1207 gen_accumulating_cond(cond, ret, r1, temp, op);
1208 tcg_temp_free(temp);
1211 static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
1213 TCGv b0 = tcg_temp_new();
1214 TCGv b1 = tcg_temp_new();
1215 TCGv b2 = tcg_temp_new();
1216 TCGv b3 = tcg_temp_new();
1218 /* byte 0 */
1219 tcg_gen_andi_tl(b0, r1, 0xff);
1220 tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
1222 /* byte 1 */
1223 tcg_gen_andi_tl(b1, r1, 0xff00);
1224 tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
1226 /* byte 2 */
1227 tcg_gen_andi_tl(b2, r1, 0xff0000);
1228 tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
1230 /* byte 3 */
1231 tcg_gen_andi_tl(b3, r1, 0xff000000);
1232 tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
1234 /* combine them */
1235 tcg_gen_or_tl(ret, b0, b1);
1236 tcg_gen_or_tl(ret, ret, b2);
1237 tcg_gen_or_tl(ret, ret, b3);
1239 tcg_temp_free(b0);
1240 tcg_temp_free(b1);
1241 tcg_temp_free(b2);
1242 tcg_temp_free(b3);
1245 static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
1247 TCGv h0 = tcg_temp_new();
1248 TCGv h1 = tcg_temp_new();
1250 /* halfword 0 */
1251 tcg_gen_andi_tl(h0, r1, 0xffff);
1252 tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
1254 /* halfword 1 */
1255 tcg_gen_andi_tl(h1, r1, 0xffff0000);
1256 tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
1258 /* combine them */
1259 tcg_gen_or_tl(ret, h0, h1);
1261 tcg_temp_free(h0);
1262 tcg_temp_free(h1);
1264 /* mask = ((1 << width) -1) << pos;
1265 ret = (r1 & ~mask) | (r2 << pos) & mask); */
1266 static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
1268 TCGv mask = tcg_temp_new();
1269 TCGv temp = tcg_temp_new();
1270 TCGv temp2 = tcg_temp_new();
1272 tcg_gen_movi_tl(mask, 1);
1273 tcg_gen_shl_tl(mask, mask, width);
1274 tcg_gen_subi_tl(mask, mask, 1);
1275 tcg_gen_shl_tl(mask, mask, pos);
1277 tcg_gen_shl_tl(temp, r2, pos);
1278 tcg_gen_and_tl(temp, temp, mask);
1279 tcg_gen_andc_tl(temp2, r1, mask);
1280 tcg_gen_or_tl(ret, temp, temp2);
1282 tcg_temp_free(mask);
1283 tcg_temp_free(temp);
1284 tcg_temp_free(temp2);
1287 /* helpers for generating program flow micro-ops */
1289 static inline void gen_save_pc(target_ulong pc)
1291 tcg_gen_movi_tl(cpu_PC, pc);
1294 static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
1296 TranslationBlock *tb;
1297 tb = ctx->tb;
1298 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
1299 likely(!ctx->singlestep_enabled)) {
1300 tcg_gen_goto_tb(n);
1301 gen_save_pc(dest);
1302 tcg_gen_exit_tb((uintptr_t)tb + n);
1303 } else {
1304 gen_save_pc(dest);
1305 if (ctx->singlestep_enabled) {
1306 /* raise exception debug */
1308 tcg_gen_exit_tb(0);
1312 static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
1313 TCGv r2, int16_t address)
1315 int jumpLabel;
1316 jumpLabel = gen_new_label();
1317 tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
1319 gen_goto_tb(ctx, 1, ctx->next_pc);
1321 gen_set_label(jumpLabel);
1322 gen_goto_tb(ctx, 0, ctx->pc + address * 2);
1325 static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
1326 int r2, int16_t address)
1328 TCGv temp = tcg_const_i32(r2);
1329 gen_branch_cond(ctx, cond, r1, temp, address);
1330 tcg_temp_free(temp);
1333 static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
1335 int l1;
1336 l1 = gen_new_label();
1338 tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
1339 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
1340 gen_goto_tb(ctx, 1, ctx->pc + offset);
1341 gen_set_label(l1);
1342 gen_goto_tb(ctx, 0, ctx->next_pc);
1345 static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
1346 int r2 , int32_t constant , int32_t offset)
1348 TCGv temp, temp2;
1349 int n;
1351 switch (opc) {
1352 /* SB-format jumps */
1353 case OPC1_16_SB_J:
1354 case OPC1_32_B_J:
1355 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1356 break;
1357 case OPC1_32_B_CALL:
1358 case OPC1_16_SB_CALL:
1359 gen_helper_1arg(call, ctx->next_pc);
1360 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1361 break;
1362 case OPC1_16_SB_JZ:
1363 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset);
1364 break;
1365 case OPC1_16_SB_JNZ:
1366 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset);
1367 break;
1368 /* SBC-format jumps */
1369 case OPC1_16_SBC_JEQ:
1370 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
1371 break;
1372 case OPC1_16_SBC_JNE:
1373 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
1374 break;
1375 /* SBRN-format jumps */
1376 case OPC1_16_SBRN_JZ_T:
1377 temp = tcg_temp_new();
1378 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
1379 gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
1380 tcg_temp_free(temp);
1381 break;
1382 case OPC1_16_SBRN_JNZ_T:
1383 temp = tcg_temp_new();
1384 tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
1385 gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
1386 tcg_temp_free(temp);
1387 break;
1388 /* SBR-format jumps */
1389 case OPC1_16_SBR_JEQ:
1390 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
1391 offset);
1392 break;
1393 case OPC1_16_SBR_JNE:
1394 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
1395 offset);
1396 break;
1397 case OPC1_16_SBR_JNZ:
1398 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset);
1399 break;
1400 case OPC1_16_SBR_JNZ_A:
1401 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
1402 break;
1403 case OPC1_16_SBR_JGEZ:
1404 gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset);
1405 break;
1406 case OPC1_16_SBR_JGTZ:
1407 gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset);
1408 break;
1409 case OPC1_16_SBR_JLEZ:
1410 gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset);
1411 break;
1412 case OPC1_16_SBR_JLTZ:
1413 gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset);
1414 break;
1415 case OPC1_16_SBR_JZ:
1416 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset);
1417 break;
1418 case OPC1_16_SBR_JZ_A:
1419 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
1420 break;
1421 case OPC1_16_SBR_LOOP:
1422 gen_loop(ctx, r1, offset * 2 - 32);
1423 break;
1424 /* SR-format jumps */
1425 case OPC1_16_SR_JI:
1426 tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
1427 tcg_gen_exit_tb(0);
1428 break;
1429 case OPC2_16_SR_RET:
1430 gen_helper_ret(cpu_env);
1431 tcg_gen_exit_tb(0);
1432 break;
1433 /* B-format */
1434 case OPC1_32_B_CALLA:
1435 gen_helper_1arg(call, ctx->next_pc);
1436 gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
1437 break;
1438 case OPC1_32_B_JLA:
1439 tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
1440 case OPC1_32_B_JA:
1441 gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
1442 break;
1443 case OPC1_32_B_JL:
1444 tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
1445 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1446 break;
1447 /* BOL format */
1448 case OPCM_32_BRC_EQ_NEQ:
1449 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) {
1450 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset);
1451 } else {
1452 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset);
1454 break;
1455 case OPCM_32_BRC_GE:
1456 if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) {
1457 gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset);
1458 } else {
1459 constant = MASK_OP_BRC_CONST4(ctx->opcode);
1460 gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant,
1461 offset);
1463 break;
1464 case OPCM_32_BRC_JLT:
1465 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) {
1466 gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset);
1467 } else {
1468 constant = MASK_OP_BRC_CONST4(ctx->opcode);
1469 gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant,
1470 offset);
1472 break;
1473 case OPCM_32_BRC_JNE:
1474 temp = tcg_temp_new();
1475 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
1476 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1477 /* subi is unconditional */
1478 tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1479 gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
1480 } else {
1481 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1482 /* addi is unconditional */
1483 tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1484 gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
1486 tcg_temp_free(temp);
1487 break;
1488 /* BRN format */
1489 case OPCM_32_BRN_JTT:
1490 n = MASK_OP_BRN_N(ctx->opcode);
1492 temp = tcg_temp_new();
1493 tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
1495 if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
1496 gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
1497 } else {
1498 gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
1500 tcg_temp_free(temp);
1501 break;
1502 /* BRR Format */
1503 case OPCM_32_BRR_EQ_NEQ:
1504 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) {
1505 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2],
1506 offset);
1507 } else {
1508 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
1509 offset);
1511 break;
1512 case OPCM_32_BRR_ADDR_EQ_NEQ:
1513 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) {
1514 gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2],
1515 offset);
1516 } else {
1517 gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2],
1518 offset);
1520 break;
1521 case OPCM_32_BRR_GE:
1522 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) {
1523 gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2],
1524 offset);
1525 } else {
1526 gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2],
1527 offset);
1529 break;
1530 case OPCM_32_BRR_JLT:
1531 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) {
1532 gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2],
1533 offset);
1534 } else {
1535 gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2],
1536 offset);
1538 break;
1539 case OPCM_32_BRR_LOOP:
1540 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) {
1541 gen_loop(ctx, r1, offset * 2);
1542 } else {
1543 /* OPC2_32_BRR_LOOPU */
1544 gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
1546 break;
1547 case OPCM_32_BRR_JNE:
1548 temp = tcg_temp_new();
1549 temp2 = tcg_temp_new();
1550 if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
1551 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1552 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1553 tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
1554 /* subi is unconditional */
1555 tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1556 gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
1557 } else {
1558 tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
1559 /* also save r2, in case of r1 == r2, so r2 is not decremented */
1560 tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
1561 /* addi is unconditional */
1562 tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
1563 gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
1565 tcg_temp_free(temp);
1566 tcg_temp_free(temp2);
1567 break;
1568 case OPCM_32_BRR_JNZ:
1569 if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) {
1570 gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
1571 } else {
1572 gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
1574 break;
1575 default:
1576 printf("Branch Error at %x\n", ctx->pc);
1578 ctx->bstate = BS_BRANCH;
1583 * Functions for decoding instructions
1586 static void decode_src_opc(DisasContext *ctx, int op1)
1588 int r1;
1589 int32_t const4;
1590 TCGv temp, temp2;
1592 r1 = MASK_OP_SRC_S1D(ctx->opcode);
1593 const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
1595 switch (op1) {
1596 case OPC1_16_SRC_ADD:
1597 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
1598 break;
1599 case OPC1_16_SRC_ADD_A15:
1600 gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
1601 break;
1602 case OPC1_16_SRC_ADD_15A:
1603 gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
1604 break;
1605 case OPC1_16_SRC_ADD_A:
1606 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
1607 break;
1608 case OPC1_16_SRC_CADD:
1609 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
1610 cpu_gpr_d[15]);
1611 break;
1612 case OPC1_16_SRC_CADDN:
1613 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
1614 cpu_gpr_d[15]);
1615 break;
1616 case OPC1_16_SRC_CMOV:
1617 temp = tcg_const_tl(0);
1618 temp2 = tcg_const_tl(const4);
1619 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
1620 temp2, cpu_gpr_d[r1]);
1621 tcg_temp_free(temp);
1622 tcg_temp_free(temp2);
1623 break;
1624 case OPC1_16_SRC_CMOVN:
1625 temp = tcg_const_tl(0);
1626 temp2 = tcg_const_tl(const4);
1627 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
1628 temp2, cpu_gpr_d[r1]);
1629 tcg_temp_free(temp);
1630 tcg_temp_free(temp2);
1631 break;
1632 case OPC1_16_SRC_EQ:
1633 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
1634 const4);
1635 break;
1636 case OPC1_16_SRC_LT:
1637 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
1638 const4);
1639 break;
1640 case OPC1_16_SRC_MOV:
1641 tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
1642 break;
1643 case OPC1_16_SRC_MOV_A:
1644 const4 = MASK_OP_SRC_CONST4(ctx->opcode);
1645 tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
1646 break;
1647 case OPC1_16_SRC_SH:
1648 gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
1649 break;
1650 case OPC1_16_SRC_SHA:
1651 gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
1652 break;
1656 static void decode_srr_opc(DisasContext *ctx, int op1)
1658 int r1, r2;
1659 TCGv temp;
1661 r1 = MASK_OP_SRR_S1D(ctx->opcode);
1662 r2 = MASK_OP_SRR_S2(ctx->opcode);
1664 switch (op1) {
1665 case OPC1_16_SRR_ADD:
1666 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1667 break;
1668 case OPC1_16_SRR_ADD_A15:
1669 gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
1670 break;
1671 case OPC1_16_SRR_ADD_15A:
1672 gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1673 break;
1674 case OPC1_16_SRR_ADD_A:
1675 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
1676 break;
1677 case OPC1_16_SRR_ADDS:
1678 gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1679 break;
1680 case OPC1_16_SRR_AND:
1681 tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1682 break;
1683 case OPC1_16_SRR_CMOV:
1684 temp = tcg_const_tl(0);
1685 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
1686 cpu_gpr_d[r2], cpu_gpr_d[r1]);
1687 tcg_temp_free(temp);
1688 break;
1689 case OPC1_16_SRR_CMOVN:
1690 temp = tcg_const_tl(0);
1691 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
1692 cpu_gpr_d[r2], cpu_gpr_d[r1]);
1693 tcg_temp_free(temp);
1694 break;
1695 case OPC1_16_SRR_EQ:
1696 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
1697 cpu_gpr_d[r2]);
1698 break;
1699 case OPC1_16_SRR_LT:
1700 tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
1701 cpu_gpr_d[r2]);
1702 break;
1703 case OPC1_16_SRR_MOV:
1704 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
1705 break;
1706 case OPC1_16_SRR_MOV_A:
1707 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
1708 break;
1709 case OPC1_16_SRR_MOV_AA:
1710 tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
1711 break;
1712 case OPC1_16_SRR_MOV_D:
1713 tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
1714 break;
1715 case OPC1_16_SRR_MUL:
1716 gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1717 break;
1718 case OPC1_16_SRR_OR:
1719 tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1720 break;
1721 case OPC1_16_SRR_SUB:
1722 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1723 break;
1724 case OPC1_16_SRR_SUB_A15B:
1725 gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
1726 break;
1727 case OPC1_16_SRR_SUB_15AB:
1728 gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1729 break;
1730 case OPC1_16_SRR_SUBS:
1731 gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1732 break;
1733 case OPC1_16_SRR_XOR:
1734 tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
1735 break;
1739 static void decode_ssr_opc(DisasContext *ctx, int op1)
1741 int r1, r2;
1743 r1 = MASK_OP_SSR_S1(ctx->opcode);
1744 r2 = MASK_OP_SSR_S2(ctx->opcode);
1746 switch (op1) {
1747 case OPC1_16_SSR_ST_A:
1748 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
1749 break;
1750 case OPC1_16_SSR_ST_A_POSTINC:
1751 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
1752 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
1753 break;
1754 case OPC1_16_SSR_ST_B:
1755 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
1756 break;
1757 case OPC1_16_SSR_ST_B_POSTINC:
1758 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
1759 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
1760 break;
1761 case OPC1_16_SSR_ST_H:
1762 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
1763 break;
1764 case OPC1_16_SSR_ST_H_POSTINC:
1765 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
1766 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
1767 break;
1768 case OPC1_16_SSR_ST_W:
1769 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
1770 break;
1771 case OPC1_16_SSR_ST_W_POSTINC:
1772 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
1773 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
1774 break;
1778 static void decode_sc_opc(DisasContext *ctx, int op1)
1780 int32_t const16;
1782 const16 = MASK_OP_SC_CONST8(ctx->opcode);
1784 switch (op1) {
1785 case OPC1_16_SC_AND:
1786 tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
1787 break;
1788 case OPC1_16_SC_BISR:
1789 gen_helper_1arg(bisr, const16 & 0xff);
1790 break;
1791 case OPC1_16_SC_LD_A:
1792 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
1793 break;
1794 case OPC1_16_SC_LD_W:
1795 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
1796 break;
1797 case OPC1_16_SC_MOV:
1798 tcg_gen_movi_tl(cpu_gpr_d[15], const16);
1799 break;
1800 case OPC1_16_SC_OR:
1801 tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
1802 break;
1803 case OPC1_16_SC_ST_A:
1804 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
1805 break;
1806 case OPC1_16_SC_ST_W:
1807 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
1808 break;
1809 case OPC1_16_SC_SUB_A:
1810 tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
1811 break;
1815 static void decode_slr_opc(DisasContext *ctx, int op1)
1817 int r1, r2;
1819 r1 = MASK_OP_SLR_D(ctx->opcode);
1820 r2 = MASK_OP_SLR_S2(ctx->opcode);
1822 switch (op1) {
1823 /* SLR-format */
1824 case OPC1_16_SLR_LD_A:
1825 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
1826 break;
1827 case OPC1_16_SLR_LD_A_POSTINC:
1828 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
1829 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
1830 break;
1831 case OPC1_16_SLR_LD_BU:
1832 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
1833 break;
1834 case OPC1_16_SLR_LD_BU_POSTINC:
1835 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
1836 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
1837 break;
1838 case OPC1_16_SLR_LD_H:
1839 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
1840 break;
1841 case OPC1_16_SLR_LD_H_POSTINC:
1842 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
1843 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
1844 break;
1845 case OPC1_16_SLR_LD_W:
1846 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
1847 break;
1848 case OPC1_16_SLR_LD_W_POSTINC:
1849 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
1850 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
1851 break;
1855 static void decode_sro_opc(DisasContext *ctx, int op1)
1857 int r2;
1858 int32_t address;
1860 r2 = MASK_OP_SRO_S2(ctx->opcode);
1861 address = MASK_OP_SRO_OFF4(ctx->opcode);
1863 /* SRO-format */
1864 switch (op1) {
1865 case OPC1_16_SRO_LD_A:
1866 gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
1867 break;
1868 case OPC1_16_SRO_LD_BU:
1869 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
1870 break;
1871 case OPC1_16_SRO_LD_H:
1872 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
1873 break;
1874 case OPC1_16_SRO_LD_W:
1875 gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
1876 break;
1877 case OPC1_16_SRO_ST_A:
1878 gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
1879 break;
1880 case OPC1_16_SRO_ST_B:
1881 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
1882 break;
1883 case OPC1_16_SRO_ST_H:
1884 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
1885 break;
1886 case OPC1_16_SRO_ST_W:
1887 gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
1888 break;
1892 static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx)
1894 uint32_t op2;
1895 op2 = MASK_OP_SR_OP2(ctx->opcode);
1897 switch (op2) {
1898 case OPC2_16_SR_NOP:
1899 break;
1900 case OPC2_16_SR_RET:
1901 gen_compute_branch(ctx, op2, 0, 0, 0, 0);
1902 break;
1903 case OPC2_16_SR_RFE:
1904 gen_helper_rfe(cpu_env);
1905 tcg_gen_exit_tb(0);
1906 ctx->bstate = BS_BRANCH;
1907 break;
1908 case OPC2_16_SR_DEBUG:
1909 /* raise EXCP_DEBUG */
1910 break;
1914 static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx)
1916 uint32_t op2;
1917 uint32_t r1;
1918 TCGv temp;
1920 r1 = MASK_OP_SR_S1D(ctx->opcode);
1921 op2 = MASK_OP_SR_OP2(ctx->opcode);
1923 switch (op2) {
1924 case OPC2_16_SR_RSUB:
1925 /* overflow only if r1 = -0x80000000 */
1926 temp = tcg_const_i32(-0x80000000);
1927 /* calc V bit */
1928 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
1929 tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
1930 /* calc SV bit */
1931 tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
1932 /* sub */
1933 tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
1934 /* calc av */
1935 tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
1936 tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
1937 /* calc sav */
1938 tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
1939 tcg_temp_free(temp);
1940 break;
1941 case OPC2_16_SR_SAT_B:
1942 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
1943 break;
1944 case OPC2_16_SR_SAT_BU:
1945 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
1946 break;
1947 case OPC2_16_SR_SAT_H:
1948 gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
1949 break;
1950 case OPC2_16_SR_SAT_HU:
1951 gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
1952 break;
1956 static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
1958 int op1;
1959 int r1, r2;
1960 int32_t const16;
1961 int32_t address;
1962 TCGv temp;
1964 op1 = MASK_OP_MAJOR(ctx->opcode);
1966 /* handle ADDSC.A opcode only being 6 bit long */
1967 if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
1968 op1 = OPC1_16_SRRS_ADDSC_A;
1971 switch (op1) {
1972 case OPC1_16_SRC_ADD:
1973 case OPC1_16_SRC_ADD_A15:
1974 case OPC1_16_SRC_ADD_15A:
1975 case OPC1_16_SRC_ADD_A:
1976 case OPC1_16_SRC_CADD:
1977 case OPC1_16_SRC_CADDN:
1978 case OPC1_16_SRC_CMOV:
1979 case OPC1_16_SRC_CMOVN:
1980 case OPC1_16_SRC_EQ:
1981 case OPC1_16_SRC_LT:
1982 case OPC1_16_SRC_MOV:
1983 case OPC1_16_SRC_MOV_A:
1984 case OPC1_16_SRC_SH:
1985 case OPC1_16_SRC_SHA:
1986 decode_src_opc(ctx, op1);
1987 break;
1988 /* SRR-format */
1989 case OPC1_16_SRR_ADD:
1990 case OPC1_16_SRR_ADD_A15:
1991 case OPC1_16_SRR_ADD_15A:
1992 case OPC1_16_SRR_ADD_A:
1993 case OPC1_16_SRR_ADDS:
1994 case OPC1_16_SRR_AND:
1995 case OPC1_16_SRR_CMOV:
1996 case OPC1_16_SRR_CMOVN:
1997 case OPC1_16_SRR_EQ:
1998 case OPC1_16_SRR_LT:
1999 case OPC1_16_SRR_MOV:
2000 case OPC1_16_SRR_MOV_A:
2001 case OPC1_16_SRR_MOV_AA:
2002 case OPC1_16_SRR_MOV_D:
2003 case OPC1_16_SRR_MUL:
2004 case OPC1_16_SRR_OR:
2005 case OPC1_16_SRR_SUB:
2006 case OPC1_16_SRR_SUB_A15B:
2007 case OPC1_16_SRR_SUB_15AB:
2008 case OPC1_16_SRR_SUBS:
2009 case OPC1_16_SRR_XOR:
2010 decode_srr_opc(ctx, op1);
2011 break;
2012 /* SSR-format */
2013 case OPC1_16_SSR_ST_A:
2014 case OPC1_16_SSR_ST_A_POSTINC:
2015 case OPC1_16_SSR_ST_B:
2016 case OPC1_16_SSR_ST_B_POSTINC:
2017 case OPC1_16_SSR_ST_H:
2018 case OPC1_16_SSR_ST_H_POSTINC:
2019 case OPC1_16_SSR_ST_W:
2020 case OPC1_16_SSR_ST_W_POSTINC:
2021 decode_ssr_opc(ctx, op1);
2022 break;
2023 /* SRRS-format */
2024 case OPC1_16_SRRS_ADDSC_A:
2025 r2 = MASK_OP_SRRS_S2(ctx->opcode);
2026 r1 = MASK_OP_SRRS_S1D(ctx->opcode);
2027 const16 = MASK_OP_SRRS_N(ctx->opcode);
2028 temp = tcg_temp_new();
2029 tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
2030 tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
2031 tcg_temp_free(temp);
2032 break;
2033 /* SLRO-format */
2034 case OPC1_16_SLRO_LD_A:
2035 r1 = MASK_OP_SLRO_D(ctx->opcode);
2036 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2037 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2038 break;
2039 case OPC1_16_SLRO_LD_BU:
2040 r1 = MASK_OP_SLRO_D(ctx->opcode);
2041 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2042 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
2043 break;
2044 case OPC1_16_SLRO_LD_H:
2045 r1 = MASK_OP_SLRO_D(ctx->opcode);
2046 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2047 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
2048 break;
2049 case OPC1_16_SLRO_LD_W:
2050 r1 = MASK_OP_SLRO_D(ctx->opcode);
2051 const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
2052 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2053 break;
2054 /* SB-format */
2055 case OPC1_16_SB_CALL:
2056 case OPC1_16_SB_J:
2057 case OPC1_16_SB_JNZ:
2058 case OPC1_16_SB_JZ:
2059 address = MASK_OP_SB_DISP8_SEXT(ctx->opcode);
2060 gen_compute_branch(ctx, op1, 0, 0, 0, address);
2061 break;
2062 /* SBC-format */
2063 case OPC1_16_SBC_JEQ:
2064 case OPC1_16_SBC_JNE:
2065 address = MASK_OP_SBC_DISP4(ctx->opcode);
2066 const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
2067 gen_compute_branch(ctx, op1, 0, 0, const16, address);
2068 break;
2069 /* SBRN-format */
2070 case OPC1_16_SBRN_JNZ_T:
2071 case OPC1_16_SBRN_JZ_T:
2072 address = MASK_OP_SBRN_DISP4(ctx->opcode);
2073 const16 = MASK_OP_SBRN_N(ctx->opcode);
2074 gen_compute_branch(ctx, op1, 0, 0, const16, address);
2075 break;
2076 /* SBR-format */
2077 case OPC1_16_SBR_JEQ:
2078 case OPC1_16_SBR_JGEZ:
2079 case OPC1_16_SBR_JGTZ:
2080 case OPC1_16_SBR_JLEZ:
2081 case OPC1_16_SBR_JLTZ:
2082 case OPC1_16_SBR_JNE:
2083 case OPC1_16_SBR_JNZ:
2084 case OPC1_16_SBR_JNZ_A:
2085 case OPC1_16_SBR_JZ:
2086 case OPC1_16_SBR_JZ_A:
2087 case OPC1_16_SBR_LOOP:
2088 r1 = MASK_OP_SBR_S2(ctx->opcode);
2089 address = MASK_OP_SBR_DISP4(ctx->opcode);
2090 gen_compute_branch(ctx, op1, r1, 0, 0, address);
2091 break;
2092 /* SC-format */
2093 case OPC1_16_SC_AND:
2094 case OPC1_16_SC_BISR:
2095 case OPC1_16_SC_LD_A:
2096 case OPC1_16_SC_LD_W:
2097 case OPC1_16_SC_MOV:
2098 case OPC1_16_SC_OR:
2099 case OPC1_16_SC_ST_A:
2100 case OPC1_16_SC_ST_W:
2101 case OPC1_16_SC_SUB_A:
2102 decode_sc_opc(ctx, op1);
2103 break;
2104 /* SLR-format */
2105 case OPC1_16_SLR_LD_A:
2106 case OPC1_16_SLR_LD_A_POSTINC:
2107 case OPC1_16_SLR_LD_BU:
2108 case OPC1_16_SLR_LD_BU_POSTINC:
2109 case OPC1_16_SLR_LD_H:
2110 case OPC1_16_SLR_LD_H_POSTINC:
2111 case OPC1_16_SLR_LD_W:
2112 case OPC1_16_SLR_LD_W_POSTINC:
2113 decode_slr_opc(ctx, op1);
2114 break;
2115 /* SRO-format */
2116 case OPC1_16_SRO_LD_A:
2117 case OPC1_16_SRO_LD_BU:
2118 case OPC1_16_SRO_LD_H:
2119 case OPC1_16_SRO_LD_W:
2120 case OPC1_16_SRO_ST_A:
2121 case OPC1_16_SRO_ST_B:
2122 case OPC1_16_SRO_ST_H:
2123 case OPC1_16_SRO_ST_W:
2124 decode_sro_opc(ctx, op1);
2125 break;
2126 /* SSRO-format */
2127 case OPC1_16_SSRO_ST_A:
2128 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2129 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2130 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2131 break;
2132 case OPC1_16_SSRO_ST_B:
2133 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2134 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2135 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
2136 break;
2137 case OPC1_16_SSRO_ST_H:
2138 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2139 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2140 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
2141 break;
2142 case OPC1_16_SSRO_ST_W:
2143 r1 = MASK_OP_SSRO_S1(ctx->opcode);
2144 const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
2145 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
2146 break;
2147 /* SR-format */
2148 case OPCM_16_SR_SYSTEM:
2149 decode_sr_system(env, ctx);
2150 break;
2151 case OPCM_16_SR_ACCU:
2152 decode_sr_accu(env, ctx);
2153 break;
2154 case OPC1_16_SR_JI:
2155 r1 = MASK_OP_SR_S1D(ctx->opcode);
2156 gen_compute_branch(ctx, op1, r1, 0, 0, 0);
2157 break;
2158 case OPC1_16_SR_NOT:
2159 r1 = MASK_OP_SR_S1D(ctx->opcode);
2160 tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
2161 break;
2166 * 32 bit instructions
2169 /* ABS-format */
2170 static void decode_abs_ldw(CPUTriCoreState *env, DisasContext *ctx)
2172 int32_t op2;
2173 int32_t r1;
2174 uint32_t address;
2175 TCGv temp;
2177 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2178 address = MASK_OP_ABS_OFF18(ctx->opcode);
2179 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2181 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2183 switch (op2) {
2184 case OPC2_32_ABS_LD_A:
2185 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
2186 break;
2187 case OPC2_32_ABS_LD_D:
2188 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2189 break;
2190 case OPC2_32_ABS_LD_DA:
2191 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2192 break;
2193 case OPC2_32_ABS_LD_W:
2194 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
2195 break;
2198 tcg_temp_free(temp);
2201 static void decode_abs_ldb(CPUTriCoreState *env, DisasContext *ctx)
2203 int32_t op2;
2204 int32_t r1;
2205 uint32_t address;
2206 TCGv temp;
2208 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2209 address = MASK_OP_ABS_OFF18(ctx->opcode);
2210 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2212 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2214 switch (op2) {
2215 case OPC2_32_ABS_LD_B:
2216 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
2217 break;
2218 case OPC2_32_ABS_LD_BU:
2219 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
2220 break;
2221 case OPC2_32_ABS_LD_H:
2222 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
2223 break;
2224 case OPC2_32_ABS_LD_HU:
2225 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
2226 break;
2229 tcg_temp_free(temp);
2232 static void decode_abs_ldst_swap(CPUTriCoreState *env, DisasContext *ctx)
2234 int32_t op2;
2235 int32_t r1;
2236 uint32_t address;
2237 TCGv temp;
2239 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2240 address = MASK_OP_ABS_OFF18(ctx->opcode);
2241 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2243 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2245 switch (op2) {
2246 case OPC2_32_ABS_LDMST:
2247 gen_ldmst(ctx, r1, temp);
2248 break;
2249 case OPC2_32_ABS_SWAP_W:
2250 gen_swap(ctx, r1, temp);
2251 break;
2254 tcg_temp_free(temp);
2257 static void decode_abs_ldst_context(CPUTriCoreState *env, DisasContext *ctx)
2259 uint32_t op2;
2260 int32_t off18;
2262 off18 = MASK_OP_ABS_OFF18(ctx->opcode);
2263 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2265 switch (op2) {
2266 case OPC2_32_ABS_LDLCX:
2267 gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18));
2268 break;
2269 case OPC2_32_ABS_LDUCX:
2270 gen_helper_1arg(lducx, EA_ABS_FORMAT(off18));
2271 break;
2272 case OPC2_32_ABS_STLCX:
2273 gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18));
2274 break;
2275 case OPC2_32_ABS_STUCX:
2276 gen_helper_1arg(stucx, EA_ABS_FORMAT(off18));
2277 break;
2281 static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx)
2283 int32_t op2;
2284 int32_t r1;
2285 uint32_t address;
2286 TCGv temp;
2288 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2289 address = MASK_OP_ABS_OFF18(ctx->opcode);
2290 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2292 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2294 switch (op2) {
2295 case OPC2_32_ABS_ST_A:
2296 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
2297 break;
2298 case OPC2_32_ABS_ST_D:
2299 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2300 break;
2301 case OPC2_32_ABS_ST_DA:
2302 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2303 break;
2304 case OPC2_32_ABS_ST_W:
2305 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
2306 break;
2309 tcg_temp_free(temp);
2312 static void decode_abs_storeb_h(CPUTriCoreState *env, DisasContext *ctx)
2314 int32_t op2;
2315 int32_t r1;
2316 uint32_t address;
2317 TCGv temp;
2319 r1 = MASK_OP_ABS_S1D(ctx->opcode);
2320 address = MASK_OP_ABS_OFF18(ctx->opcode);
2321 op2 = MASK_OP_ABS_OP2(ctx->opcode);
2323 temp = tcg_const_i32(EA_ABS_FORMAT(address));
2325 switch (op2) {
2326 case OPC2_32_ABS_ST_B:
2327 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
2328 break;
2329 case OPC2_32_ABS_ST_H:
2330 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
2331 break;
2333 tcg_temp_free(temp);
2336 /* Bit-format */
2338 static void decode_bit_andacc(CPUTriCoreState *env, DisasContext *ctx)
2340 uint32_t op2;
2341 int r1, r2, r3;
2342 int pos1, pos2;
2344 r1 = MASK_OP_BIT_S1(ctx->opcode);
2345 r2 = MASK_OP_BIT_S2(ctx->opcode);
2346 r3 = MASK_OP_BIT_D(ctx->opcode);
2347 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2348 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2349 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2352 switch (op2) {
2353 case OPC2_32_BIT_AND_AND_T:
2354 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2355 pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl);
2356 break;
2357 case OPC2_32_BIT_AND_ANDN_T:
2358 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2359 pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
2360 break;
2361 case OPC2_32_BIT_AND_NOR_T:
2362 if (TCG_TARGET_HAS_andc_i32) {
2363 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2364 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
2365 } else {
2366 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2367 pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl);
2369 break;
2370 case OPC2_32_BIT_AND_OR_T:
2371 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2372 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl);
2373 break;
2377 static void decode_bit_logical_t(CPUTriCoreState *env, DisasContext *ctx)
2379 uint32_t op2;
2380 int r1, r2, r3;
2381 int pos1, pos2;
2382 r1 = MASK_OP_BIT_S1(ctx->opcode);
2383 r2 = MASK_OP_BIT_S2(ctx->opcode);
2384 r3 = MASK_OP_BIT_D(ctx->opcode);
2385 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2386 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2387 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2389 switch (op2) {
2390 case OPC2_32_BIT_AND_T:
2391 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2392 pos1, pos2, &tcg_gen_and_tl);
2393 break;
2394 case OPC2_32_BIT_ANDN_T:
2395 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2396 pos1, pos2, &tcg_gen_andc_tl);
2397 break;
2398 case OPC2_32_BIT_NOR_T:
2399 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2400 pos1, pos2, &tcg_gen_nor_tl);
2401 break;
2402 case OPC2_32_BIT_OR_T:
2403 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2404 pos1, pos2, &tcg_gen_or_tl);
2405 break;
2409 static void decode_bit_insert(CPUTriCoreState *env, DisasContext *ctx)
2411 uint32_t op2;
2412 int r1, r2, r3;
2413 int pos1, pos2;
2414 TCGv temp;
2415 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2416 r1 = MASK_OP_BIT_S1(ctx->opcode);
2417 r2 = MASK_OP_BIT_S2(ctx->opcode);
2418 r3 = MASK_OP_BIT_D(ctx->opcode);
2419 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2420 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2422 temp = tcg_temp_new();
2424 tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
2425 if (op2 == OPC2_32_BIT_INSN_T) {
2426 tcg_gen_not_tl(temp, temp);
2428 tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
2429 tcg_temp_free(temp);
2432 static void decode_bit_logical_t2(CPUTriCoreState *env, DisasContext *ctx)
2434 uint32_t op2;
2436 int r1, r2, r3;
2437 int pos1, pos2;
2439 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2440 r1 = MASK_OP_BIT_S1(ctx->opcode);
2441 r2 = MASK_OP_BIT_S2(ctx->opcode);
2442 r3 = MASK_OP_BIT_D(ctx->opcode);
2443 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2444 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2446 switch (op2) {
2447 case OPC2_32_BIT_NAND_T:
2448 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2449 pos1, pos2, &tcg_gen_nand_tl);
2450 break;
2451 case OPC2_32_BIT_ORN_T:
2452 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2453 pos1, pos2, &tcg_gen_orc_tl);
2454 break;
2455 case OPC2_32_BIT_XNOR_T:
2456 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2457 pos1, pos2, &tcg_gen_eqv_tl);
2458 break;
2459 case OPC2_32_BIT_XOR_T:
2460 gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2461 pos1, pos2, &tcg_gen_xor_tl);
2462 break;
2466 static void decode_bit_orand(CPUTriCoreState *env, DisasContext *ctx)
2468 uint32_t op2;
2470 int r1, r2, r3;
2471 int pos1, pos2;
2473 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2474 r1 = MASK_OP_BIT_S1(ctx->opcode);
2475 r2 = MASK_OP_BIT_S2(ctx->opcode);
2476 r3 = MASK_OP_BIT_D(ctx->opcode);
2477 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2478 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2480 switch (op2) {
2481 case OPC2_32_BIT_OR_AND_T:
2482 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2483 pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl);
2484 break;
2485 case OPC2_32_BIT_OR_ANDN_T:
2486 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2487 pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
2488 break;
2489 case OPC2_32_BIT_OR_NOR_T:
2490 if (TCG_TARGET_HAS_orc_i32) {
2491 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2492 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
2493 } else {
2494 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2495 pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl);
2497 break;
2498 case OPC2_32_BIT_OR_OR_T:
2499 gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
2500 pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl);
2501 break;
2505 static void decode_bit_sh_logic1(CPUTriCoreState *env, DisasContext *ctx)
2507 uint32_t op2;
2508 int r1, r2, r3;
2509 int pos1, pos2;
2510 TCGv temp;
2512 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2513 r1 = MASK_OP_BIT_S1(ctx->opcode);
2514 r2 = MASK_OP_BIT_S2(ctx->opcode);
2515 r3 = MASK_OP_BIT_D(ctx->opcode);
2516 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2517 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2519 temp = tcg_temp_new();
2521 switch (op2) {
2522 case OPC2_32_BIT_SH_AND_T:
2523 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2524 pos1, pos2, &tcg_gen_and_tl);
2525 break;
2526 case OPC2_32_BIT_SH_ANDN_T:
2527 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2528 pos1, pos2, &tcg_gen_andc_tl);
2529 break;
2530 case OPC2_32_BIT_SH_NOR_T:
2531 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2532 pos1, pos2, &tcg_gen_nor_tl);
2533 break;
2534 case OPC2_32_BIT_SH_OR_T:
2535 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2536 pos1, pos2, &tcg_gen_or_tl);
2537 break;
2539 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
2540 tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
2541 tcg_temp_free(temp);
2544 static void decode_bit_sh_logic2(CPUTriCoreState *env, DisasContext *ctx)
2546 uint32_t op2;
2547 int r1, r2, r3;
2548 int pos1, pos2;
2549 TCGv temp;
2551 op2 = MASK_OP_BIT_OP2(ctx->opcode);
2552 r1 = MASK_OP_BIT_S1(ctx->opcode);
2553 r2 = MASK_OP_BIT_S2(ctx->opcode);
2554 r3 = MASK_OP_BIT_D(ctx->opcode);
2555 pos1 = MASK_OP_BIT_POS1(ctx->opcode);
2556 pos2 = MASK_OP_BIT_POS2(ctx->opcode);
2558 temp = tcg_temp_new();
2560 switch (op2) {
2561 case OPC2_32_BIT_SH_NAND_T:
2562 gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] ,
2563 pos1, pos2, &tcg_gen_nand_tl);
2564 break;
2565 case OPC2_32_BIT_SH_ORN_T:
2566 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2567 pos1, pos2, &tcg_gen_orc_tl);
2568 break;
2569 case OPC2_32_BIT_SH_XNOR_T:
2570 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2571 pos1, pos2, &tcg_gen_eqv_tl);
2572 break;
2573 case OPC2_32_BIT_SH_XOR_T:
2574 gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
2575 pos1, pos2, &tcg_gen_xor_tl);
2576 break;
2578 tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
2579 tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
2580 tcg_temp_free(temp);
2583 /* BO-format */
2586 static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env,
2587 DisasContext *ctx)
2589 uint32_t op2;
2590 uint32_t off10;
2591 int32_t r1, r2;
2592 TCGv temp;
2594 r1 = MASK_OP_BO_S1D(ctx->opcode);
2595 r2 = MASK_OP_BO_S2(ctx->opcode);
2596 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
2597 op2 = MASK_OP_BO_OP2(ctx->opcode);
2599 switch (op2) {
2600 case OPC2_32_BO_CACHEA_WI_SHORTOFF:
2601 case OPC2_32_BO_CACHEA_W_SHORTOFF:
2602 case OPC2_32_BO_CACHEA_I_SHORTOFF:
2603 /* instruction to access the cache */
2604 break;
2605 case OPC2_32_BO_CACHEA_WI_POSTINC:
2606 case OPC2_32_BO_CACHEA_W_POSTINC:
2607 case OPC2_32_BO_CACHEA_I_POSTINC:
2608 /* instruction to access the cache, but we still need to handle
2609 the addressing mode */
2610 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2611 break;
2612 case OPC2_32_BO_CACHEA_WI_PREINC:
2613 case OPC2_32_BO_CACHEA_W_PREINC:
2614 case OPC2_32_BO_CACHEA_I_PREINC:
2615 /* instruction to access the cache, but we still need to handle
2616 the addressing mode */
2617 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2618 break;
2619 case OPC2_32_BO_CACHEI_WI_SHORTOFF:
2620 case OPC2_32_BO_CACHEI_W_SHORTOFF:
2621 /* TODO: Raise illegal opcode trap,
2622 if !tricore_feature(TRICORE_FEATURE_131) */
2623 break;
2624 case OPC2_32_BO_CACHEI_W_POSTINC:
2625 case OPC2_32_BO_CACHEI_WI_POSTINC:
2626 if (tricore_feature(env, TRICORE_FEATURE_131)) {
2627 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2628 } /* TODO: else raise illegal opcode trap */
2629 break;
2630 case OPC2_32_BO_CACHEI_W_PREINC:
2631 case OPC2_32_BO_CACHEI_WI_PREINC:
2632 if (tricore_feature(env, TRICORE_FEATURE_131)) {
2633 tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
2634 } /* TODO: else raise illegal opcode trap */
2635 break;
2636 case OPC2_32_BO_ST_A_SHORTOFF:
2637 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
2638 break;
2639 case OPC2_32_BO_ST_A_POSTINC:
2640 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
2641 MO_LESL);
2642 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2643 break;
2644 case OPC2_32_BO_ST_A_PREINC:
2645 gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
2646 break;
2647 case OPC2_32_BO_ST_B_SHORTOFF:
2648 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
2649 break;
2650 case OPC2_32_BO_ST_B_POSTINC:
2651 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2652 MO_UB);
2653 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2654 break;
2655 case OPC2_32_BO_ST_B_PREINC:
2656 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
2657 break;
2658 case OPC2_32_BO_ST_D_SHORTOFF:
2659 gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
2660 off10, ctx);
2661 break;
2662 case OPC2_32_BO_ST_D_POSTINC:
2663 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
2664 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2665 break;
2666 case OPC2_32_BO_ST_D_PREINC:
2667 temp = tcg_temp_new();
2668 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
2669 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2670 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
2671 tcg_temp_free(temp);
2672 break;
2673 case OPC2_32_BO_ST_DA_SHORTOFF:
2674 gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
2675 off10, ctx);
2676 break;
2677 case OPC2_32_BO_ST_DA_POSTINC:
2678 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
2679 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2680 break;
2681 case OPC2_32_BO_ST_DA_PREINC:
2682 temp = tcg_temp_new();
2683 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
2684 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2685 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
2686 tcg_temp_free(temp);
2687 break;
2688 case OPC2_32_BO_ST_H_SHORTOFF:
2689 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2690 break;
2691 case OPC2_32_BO_ST_H_POSTINC:
2692 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2693 MO_LEUW);
2694 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2695 break;
2696 case OPC2_32_BO_ST_H_PREINC:
2697 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2698 break;
2699 case OPC2_32_BO_ST_Q_SHORTOFF:
2700 temp = tcg_temp_new();
2701 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
2702 gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
2703 tcg_temp_free(temp);
2704 break;
2705 case OPC2_32_BO_ST_Q_POSTINC:
2706 temp = tcg_temp_new();
2707 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
2708 tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
2709 MO_LEUW);
2710 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2711 tcg_temp_free(temp);
2712 break;
2713 case OPC2_32_BO_ST_Q_PREINC:
2714 temp = tcg_temp_new();
2715 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
2716 gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
2717 tcg_temp_free(temp);
2718 break;
2719 case OPC2_32_BO_ST_W_SHORTOFF:
2720 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2721 break;
2722 case OPC2_32_BO_ST_W_POSTINC:
2723 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2724 MO_LEUL);
2725 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2726 break;
2727 case OPC2_32_BO_ST_W_PREINC:
2728 gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2729 break;
2733 static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState *env,
2734 DisasContext *ctx)
2736 uint32_t op2;
2737 uint32_t off10;
2738 int32_t r1, r2;
2739 TCGv temp, temp2, temp3;
2741 r1 = MASK_OP_BO_S1D(ctx->opcode);
2742 r2 = MASK_OP_BO_S2(ctx->opcode);
2743 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
2744 op2 = MASK_OP_BO_OP2(ctx->opcode);
2746 temp = tcg_temp_new();
2747 temp2 = tcg_temp_new();
2748 temp3 = tcg_const_i32(off10);
2750 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
2751 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
2753 switch (op2) {
2754 case OPC2_32_BO_CACHEA_WI_BR:
2755 case OPC2_32_BO_CACHEA_W_BR:
2756 case OPC2_32_BO_CACHEA_I_BR:
2757 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2758 break;
2759 case OPC2_32_BO_CACHEA_WI_CIRC:
2760 case OPC2_32_BO_CACHEA_W_CIRC:
2761 case OPC2_32_BO_CACHEA_I_CIRC:
2762 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2763 break;
2764 case OPC2_32_BO_ST_A_BR:
2765 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
2766 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2767 break;
2768 case OPC2_32_BO_ST_A_CIRC:
2769 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
2770 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2771 break;
2772 case OPC2_32_BO_ST_B_BR:
2773 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
2774 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2775 break;
2776 case OPC2_32_BO_ST_B_CIRC:
2777 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
2778 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2779 break;
2780 case OPC2_32_BO_ST_D_BR:
2781 gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
2782 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2783 break;
2784 case OPC2_32_BO_ST_D_CIRC:
2785 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
2786 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
2787 tcg_gen_addi_tl(temp, temp, 4);
2788 tcg_gen_rem_tl(temp, temp, temp2);
2789 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
2790 tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
2791 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2792 break;
2793 case OPC2_32_BO_ST_DA_BR:
2794 gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
2795 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2796 break;
2797 case OPC2_32_BO_ST_DA_CIRC:
2798 tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
2799 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
2800 tcg_gen_addi_tl(temp, temp, 4);
2801 tcg_gen_rem_tl(temp, temp, temp2);
2802 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
2803 tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
2804 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2805 break;
2806 case OPC2_32_BO_ST_H_BR:
2807 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
2808 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2809 break;
2810 case OPC2_32_BO_ST_H_CIRC:
2811 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
2812 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2813 break;
2814 case OPC2_32_BO_ST_Q_BR:
2815 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
2816 tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
2817 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2818 break;
2819 case OPC2_32_BO_ST_Q_CIRC:
2820 tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
2821 tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
2822 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2823 break;
2824 case OPC2_32_BO_ST_W_BR:
2825 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
2826 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2827 break;
2828 case OPC2_32_BO_ST_W_CIRC:
2829 tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
2830 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2831 break;
2833 tcg_temp_free(temp);
2834 tcg_temp_free(temp2);
2835 tcg_temp_free(temp3);
2838 static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState *env,
2839 DisasContext *ctx)
2841 uint32_t op2;
2842 uint32_t off10;
2843 int32_t r1, r2;
2844 TCGv temp;
2846 r1 = MASK_OP_BO_S1D(ctx->opcode);
2847 r2 = MASK_OP_BO_S2(ctx->opcode);
2848 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
2849 op2 = MASK_OP_BO_OP2(ctx->opcode);
2851 switch (op2) {
2852 case OPC2_32_BO_LD_A_SHORTOFF:
2853 gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2854 break;
2855 case OPC2_32_BO_LD_A_POSTINC:
2856 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
2857 MO_LEUL);
2858 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2859 break;
2860 case OPC2_32_BO_LD_A_PREINC:
2861 gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2862 break;
2863 case OPC2_32_BO_LD_B_SHORTOFF:
2864 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
2865 break;
2866 case OPC2_32_BO_LD_B_POSTINC:
2867 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2868 MO_SB);
2869 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2870 break;
2871 case OPC2_32_BO_LD_B_PREINC:
2872 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
2873 break;
2874 case OPC2_32_BO_LD_BU_SHORTOFF:
2875 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
2876 break;
2877 case OPC2_32_BO_LD_BU_POSTINC:
2878 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2879 MO_UB);
2880 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2881 break;
2882 case OPC2_32_BO_LD_BU_PREINC:
2883 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
2884 break;
2885 case OPC2_32_BO_LD_D_SHORTOFF:
2886 gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
2887 off10, ctx);
2888 break;
2889 case OPC2_32_BO_LD_D_POSTINC:
2890 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
2891 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2892 break;
2893 case OPC2_32_BO_LD_D_PREINC:
2894 temp = tcg_temp_new();
2895 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
2896 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
2897 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
2898 tcg_temp_free(temp);
2899 break;
2900 case OPC2_32_BO_LD_DA_SHORTOFF:
2901 gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
2902 off10, ctx);
2903 break;
2904 case OPC2_32_BO_LD_DA_POSTINC:
2905 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
2906 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2907 break;
2908 case OPC2_32_BO_LD_DA_PREINC:
2909 temp = tcg_temp_new();
2910 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
2911 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
2912 tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
2913 tcg_temp_free(temp);
2914 break;
2915 case OPC2_32_BO_LD_H_SHORTOFF:
2916 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
2917 break;
2918 case OPC2_32_BO_LD_H_POSTINC:
2919 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2920 MO_LESW);
2921 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2922 break;
2923 case OPC2_32_BO_LD_H_PREINC:
2924 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
2925 break;
2926 case OPC2_32_BO_LD_HU_SHORTOFF:
2927 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2928 break;
2929 case OPC2_32_BO_LD_HU_POSTINC:
2930 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2931 MO_LEUW);
2932 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2933 break;
2934 case OPC2_32_BO_LD_HU_PREINC:
2935 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2936 break;
2937 case OPC2_32_BO_LD_Q_SHORTOFF:
2938 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2939 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
2940 break;
2941 case OPC2_32_BO_LD_Q_POSTINC:
2942 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2943 MO_LEUW);
2944 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
2945 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2946 break;
2947 case OPC2_32_BO_LD_Q_PREINC:
2948 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
2949 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
2950 break;
2951 case OPC2_32_BO_LD_W_SHORTOFF:
2952 gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2953 break;
2954 case OPC2_32_BO_LD_W_POSTINC:
2955 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
2956 MO_LEUL);
2957 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
2958 break;
2959 case OPC2_32_BO_LD_W_PREINC:
2960 gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
2961 break;
2965 static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState *env,
2966 DisasContext *ctx)
2968 uint32_t op2;
2969 uint32_t off10;
2970 int r1, r2;
2972 TCGv temp, temp2, temp3;
2974 r1 = MASK_OP_BO_S1D(ctx->opcode);
2975 r2 = MASK_OP_BO_S2(ctx->opcode);
2976 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
2977 op2 = MASK_OP_BO_OP2(ctx->opcode);
2979 temp = tcg_temp_new();
2980 temp2 = tcg_temp_new();
2981 temp3 = tcg_const_i32(off10);
2983 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
2984 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
2987 switch (op2) {
2988 case OPC2_32_BO_LD_A_BR:
2989 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
2990 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2991 break;
2992 case OPC2_32_BO_LD_A_CIRC:
2993 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
2994 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
2995 break;
2996 case OPC2_32_BO_LD_B_BR:
2997 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
2998 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
2999 break;
3000 case OPC2_32_BO_LD_B_CIRC:
3001 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
3002 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3003 break;
3004 case OPC2_32_BO_LD_BU_BR:
3005 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3006 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3007 break;
3008 case OPC2_32_BO_LD_BU_CIRC:
3009 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
3010 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3011 break;
3012 case OPC2_32_BO_LD_D_BR:
3013 gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
3014 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3015 break;
3016 case OPC2_32_BO_LD_D_CIRC:
3017 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3018 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3019 tcg_gen_addi_tl(temp, temp, 4);
3020 tcg_gen_rem_tl(temp, temp, temp2);
3021 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3022 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3023 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3024 break;
3025 case OPC2_32_BO_LD_DA_BR:
3026 gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
3027 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3028 break;
3029 case OPC2_32_BO_LD_DA_CIRC:
3030 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
3031 tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
3032 tcg_gen_addi_tl(temp, temp, 4);
3033 tcg_gen_rem_tl(temp, temp, temp2);
3034 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3035 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
3036 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3037 break;
3038 case OPC2_32_BO_LD_H_BR:
3039 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
3040 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3041 break;
3042 case OPC2_32_BO_LD_H_CIRC:
3043 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
3044 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3045 break;
3046 case OPC2_32_BO_LD_HU_BR:
3047 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3048 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3049 break;
3050 case OPC2_32_BO_LD_HU_CIRC:
3051 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3052 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3053 break;
3054 case OPC2_32_BO_LD_Q_BR:
3055 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3056 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3057 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3058 break;
3059 case OPC2_32_BO_LD_Q_CIRC:
3060 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
3061 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3062 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3063 break;
3064 case OPC2_32_BO_LD_W_BR:
3065 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3066 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3067 break;
3068 case OPC2_32_BO_LD_W_CIRC:
3069 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
3070 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3071 break;
3073 tcg_temp_free(temp);
3074 tcg_temp_free(temp2);
3075 tcg_temp_free(temp3);
3078 static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState *env,
3079 DisasContext *ctx)
3081 uint32_t op2;
3082 uint32_t off10;
3083 int r1, r2;
3085 TCGv temp, temp2;
3087 r1 = MASK_OP_BO_S1D(ctx->opcode);
3088 r2 = MASK_OP_BO_S2(ctx->opcode);
3089 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3090 op2 = MASK_OP_BO_OP2(ctx->opcode);
3093 temp = tcg_temp_new();
3094 temp2 = tcg_temp_new();
3096 switch (op2) {
3097 case OPC2_32_BO_LDLCX_SHORTOFF:
3098 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3099 gen_helper_ldlcx(cpu_env, temp);
3100 break;
3101 case OPC2_32_BO_LDMST_SHORTOFF:
3102 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3103 gen_ldmst(ctx, r1, temp);
3104 break;
3105 case OPC2_32_BO_LDMST_POSTINC:
3106 gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
3107 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3108 break;
3109 case OPC2_32_BO_LDMST_PREINC:
3110 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3111 gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
3112 break;
3113 case OPC2_32_BO_LDUCX_SHORTOFF:
3114 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3115 gen_helper_lducx(cpu_env, temp);
3116 break;
3117 case OPC2_32_BO_LEA_SHORTOFF:
3118 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
3119 break;
3120 case OPC2_32_BO_STLCX_SHORTOFF:
3121 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3122 gen_helper_stlcx(cpu_env, temp);
3123 break;
3124 case OPC2_32_BO_STUCX_SHORTOFF:
3125 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3126 gen_helper_stucx(cpu_env, temp);
3127 break;
3128 case OPC2_32_BO_SWAP_W_SHORTOFF:
3129 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
3130 gen_swap(ctx, r1, temp);
3131 break;
3132 case OPC2_32_BO_SWAP_W_POSTINC:
3133 gen_swap(ctx, r1, cpu_gpr_a[r2]);
3134 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3135 break;
3136 case OPC2_32_BO_SWAP_W_PREINC:
3137 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
3138 gen_swap(ctx, r1, cpu_gpr_a[r2]);
3139 break;
3141 tcg_temp_free(temp);
3142 tcg_temp_free(temp2);
3145 static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState *env,
3146 DisasContext *ctx)
3148 uint32_t op2;
3149 uint32_t off10;
3150 int r1, r2;
3152 TCGv temp, temp2, temp3;
3154 r1 = MASK_OP_BO_S1D(ctx->opcode);
3155 r2 = MASK_OP_BO_S2(ctx->opcode);
3156 off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
3157 op2 = MASK_OP_BO_OP2(ctx->opcode);
3159 temp = tcg_temp_new();
3160 temp2 = tcg_temp_new();
3161 temp3 = tcg_const_i32(off10);
3163 tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
3164 tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
3166 switch (op2) {
3167 case OPC2_32_BO_LDMST_BR:
3168 gen_ldmst(ctx, r1, temp2);
3169 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3170 break;
3171 case OPC2_32_BO_LDMST_CIRC:
3172 gen_ldmst(ctx, r1, temp2);
3173 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3174 break;
3175 case OPC2_32_BO_SWAP_W_BR:
3176 gen_swap(ctx, r1, temp2);
3177 gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
3178 break;
3179 case OPC2_32_BO_SWAP_W_CIRC:
3180 gen_swap(ctx, r1, temp2);
3181 gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
3182 break;
3184 tcg_temp_free(temp);
3185 tcg_temp_free(temp2);
3186 tcg_temp_free(temp3);
3189 static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1)
3191 int r1, r2;
3192 int32_t address;
3193 TCGv temp;
3195 r1 = MASK_OP_BOL_S1D(ctx->opcode);
3196 r2 = MASK_OP_BOL_S2(ctx->opcode);
3197 address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode);
3199 switch (op1) {
3200 case OPC1_32_BOL_LD_A_LONGOFF:
3201 temp = tcg_temp_new();
3202 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
3203 tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
3204 tcg_temp_free(temp);
3205 break;
3206 case OPC1_32_BOL_LD_W_LONFOFF:
3207 temp = tcg_temp_new();
3208 tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
3209 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
3210 tcg_temp_free(temp);
3211 break;
3212 case OPC1_32_BOL_LEA_LONGOFF:
3213 tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
3214 break;
3215 case OPC1_32_BOL_ST_A_LONGOFF:
3216 if (tricore_feature(env, TRICORE_FEATURE_16)) {
3217 gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL);
3218 } else {
3219 /* raise illegal opcode trap */
3221 break;
3222 case OPC1_32_BOL_ST_W_LONGOFF:
3223 gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL);
3224 break;
3229 /* RC format */
3230 static void decode_rc_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
3232 uint32_t op2;
3233 int r1, r2;
3234 int32_t const9;
3235 TCGv temp;
3237 r2 = MASK_OP_RC_D(ctx->opcode);
3238 r1 = MASK_OP_RC_S1(ctx->opcode);
3239 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3240 op2 = MASK_OP_RC_OP2(ctx->opcode);
3242 temp = tcg_temp_new();
3244 switch (op2) {
3245 case OPC2_32_RC_AND:
3246 tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3247 break;
3248 case OPC2_32_RC_ANDN:
3249 tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
3250 break;
3251 case OPC2_32_RC_NAND:
3252 tcg_gen_movi_tl(temp, const9);
3253 tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
3254 break;
3255 case OPC2_32_RC_NOR:
3256 tcg_gen_movi_tl(temp, const9);
3257 tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
3258 break;
3259 case OPC2_32_RC_OR:
3260 tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3261 break;
3262 case OPC2_32_RC_ORN:
3263 tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
3264 break;
3265 case OPC2_32_RC_SH:
3266 const9 = sextract32(const9, 0, 6);
3267 gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3268 break;
3269 case OPC2_32_RC_SH_H:
3270 const9 = sextract32(const9, 0, 5);
3271 gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3272 break;
3273 case OPC2_32_RC_SHA:
3274 const9 = sextract32(const9, 0, 6);
3275 gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3276 break;
3277 case OPC2_32_RC_SHA_H:
3278 const9 = sextract32(const9, 0, 5);
3279 gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3280 break;
3281 case OPC2_32_RC_SHAS:
3282 gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3283 break;
3284 case OPC2_32_RC_XNOR:
3285 tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3286 tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
3287 break;
3288 case OPC2_32_RC_XOR:
3289 tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3290 break;
3292 tcg_temp_free(temp);
3295 static void decode_rc_accumulator(CPUTriCoreState *env, DisasContext *ctx)
3297 uint32_t op2;
3298 int r1, r2;
3299 int16_t const9;
3301 TCGv temp;
3303 r2 = MASK_OP_RC_D(ctx->opcode);
3304 r1 = MASK_OP_RC_S1(ctx->opcode);
3305 const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
3307 op2 = MASK_OP_RC_OP2(ctx->opcode);
3309 temp = tcg_temp_new();
3311 switch (op2) {
3312 case OPC2_32_RC_ABSDIF:
3313 gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3314 break;
3315 case OPC2_32_RC_ABSDIFS:
3316 gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3317 break;
3318 case OPC2_32_RC_ADD:
3319 gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3320 break;
3321 case OPC2_32_RC_ADDC:
3322 gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3323 break;
3324 case OPC2_32_RC_ADDS:
3325 gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3326 break;
3327 case OPC2_32_RC_ADDS_U:
3328 gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3329 break;
3330 case OPC2_32_RC_ADDX:
3331 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3332 break;
3333 case OPC2_32_RC_AND_EQ:
3334 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3335 const9, &tcg_gen_and_tl);
3336 break;
3337 case OPC2_32_RC_AND_GE:
3338 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3339 const9, &tcg_gen_and_tl);
3340 break;
3341 case OPC2_32_RC_AND_GE_U:
3342 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3343 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3344 const9, &tcg_gen_and_tl);
3345 break;
3346 case OPC2_32_RC_AND_LT:
3347 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3348 const9, &tcg_gen_and_tl);
3349 break;
3350 case OPC2_32_RC_AND_LT_U:
3351 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3352 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3353 const9, &tcg_gen_and_tl);
3354 break;
3355 case OPC2_32_RC_AND_NE:
3356 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3357 const9, &tcg_gen_and_tl);
3358 break;
3359 case OPC2_32_RC_EQ:
3360 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3361 break;
3362 case OPC2_32_RC_EQANY_B:
3363 gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3364 break;
3365 case OPC2_32_RC_EQANY_H:
3366 gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3367 break;
3368 case OPC2_32_RC_GE:
3369 tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3370 break;
3371 case OPC2_32_RC_GE_U:
3372 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3373 tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3374 break;
3375 case OPC2_32_RC_LT:
3376 tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3377 break;
3378 case OPC2_32_RC_LT_U:
3379 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3380 tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3381 break;
3382 case OPC2_32_RC_MAX:
3383 tcg_gen_movi_tl(temp, const9);
3384 tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3385 cpu_gpr_d[r1], temp);
3386 break;
3387 case OPC2_32_RC_MAX_U:
3388 tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
3389 tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3390 cpu_gpr_d[r1], temp);
3391 break;
3392 case OPC2_32_RC_MIN:
3393 tcg_gen_movi_tl(temp, const9);
3394 tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3395 cpu_gpr_d[r1], temp);
3396 break;
3397 case OPC2_32_RC_MIN_U:
3398 tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
3399 tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
3400 cpu_gpr_d[r1], temp);
3401 break;
3402 case OPC2_32_RC_NE:
3403 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3404 break;
3405 case OPC2_32_RC_OR_EQ:
3406 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3407 const9, &tcg_gen_or_tl);
3408 break;
3409 case OPC2_32_RC_OR_GE:
3410 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3411 const9, &tcg_gen_or_tl);
3412 break;
3413 case OPC2_32_RC_OR_GE_U:
3414 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3415 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3416 const9, &tcg_gen_or_tl);
3417 break;
3418 case OPC2_32_RC_OR_LT:
3419 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3420 const9, &tcg_gen_or_tl);
3421 break;
3422 case OPC2_32_RC_OR_LT_U:
3423 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3424 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3425 const9, &tcg_gen_or_tl);
3426 break;
3427 case OPC2_32_RC_OR_NE:
3428 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3429 const9, &tcg_gen_or_tl);
3430 break;
3431 case OPC2_32_RC_RSUB:
3432 tcg_gen_movi_tl(temp, const9);
3433 gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3434 break;
3435 case OPC2_32_RC_RSUBS:
3436 tcg_gen_movi_tl(temp, const9);
3437 gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3438 break;
3439 case OPC2_32_RC_RSUBS_U:
3440 tcg_gen_movi_tl(temp, const9);
3441 gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
3442 break;
3443 case OPC2_32_RC_SH_EQ:
3444 gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3445 break;
3446 case OPC2_32_RC_SH_GE:
3447 gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3448 break;
3449 case OPC2_32_RC_SH_GE_U:
3450 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3451 gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3452 break;
3453 case OPC2_32_RC_SH_LT:
3454 gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3455 break;
3456 case OPC2_32_RC_SH_LT_U:
3457 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3458 gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3459 break;
3460 case OPC2_32_RC_SH_NE:
3461 gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3462 break;
3463 case OPC2_32_RC_XOR_EQ:
3464 gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
3465 const9, &tcg_gen_xor_tl);
3466 break;
3467 case OPC2_32_RC_XOR_GE:
3468 gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3469 const9, &tcg_gen_xor_tl);
3470 break;
3471 case OPC2_32_RC_XOR_GE_U:
3472 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3473 gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3474 const9, &tcg_gen_xor_tl);
3475 break;
3476 case OPC2_32_RC_XOR_LT:
3477 gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
3478 const9, &tcg_gen_xor_tl);
3479 break;
3480 case OPC2_32_RC_XOR_LT_U:
3481 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3482 gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
3483 const9, &tcg_gen_xor_tl);
3484 break;
3485 case OPC2_32_RC_XOR_NE:
3486 gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
3487 const9, &tcg_gen_xor_tl);
3488 break;
3490 tcg_temp_free(temp);
3493 static void decode_rc_serviceroutine(CPUTriCoreState *env, DisasContext *ctx)
3495 uint32_t op2;
3496 uint32_t const9;
3498 op2 = MASK_OP_RC_OP2(ctx->opcode);
3499 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3501 switch (op2) {
3502 case OPC2_32_RC_BISR:
3503 gen_helper_1arg(bisr, const9);
3504 break;
3505 case OPC2_32_RC_SYSCALL:
3506 /* TODO: Add exception generation */
3507 break;
3511 static void decode_rc_mul(CPUTriCoreState *env, DisasContext *ctx)
3513 uint32_t op2;
3514 int r1, r2;
3515 int16_t const9;
3517 r2 = MASK_OP_RC_D(ctx->opcode);
3518 r1 = MASK_OP_RC_S1(ctx->opcode);
3519 const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
3521 op2 = MASK_OP_RC_OP2(ctx->opcode);
3523 switch (op2) {
3524 case OPC2_32_RC_MUL_32:
3525 gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3526 break;
3527 case OPC2_32_RC_MUL_64:
3528 gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
3529 break;
3530 case OPC2_32_RC_MULS_32:
3531 gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3532 break;
3533 case OPC2_32_RC_MUL_U_64:
3534 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3535 gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
3536 break;
3537 case OPC2_32_RC_MULS_U_32:
3538 const9 = MASK_OP_RC_CONST9(ctx->opcode);
3539 gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
3540 break;
3544 /* RCPW format */
3545 static void decode_rcpw_insert(CPUTriCoreState *env, DisasContext *ctx)
3547 uint32_t op2;
3548 int r1, r2;
3549 int32_t pos, width, const4;
3551 TCGv temp;
3553 op2 = MASK_OP_RCPW_OP2(ctx->opcode);
3554 r1 = MASK_OP_RCPW_S1(ctx->opcode);
3555 r2 = MASK_OP_RCPW_D(ctx->opcode);
3556 const4 = MASK_OP_RCPW_CONST4(ctx->opcode);
3557 width = MASK_OP_RCPW_WIDTH(ctx->opcode);
3558 pos = MASK_OP_RCPW_POS(ctx->opcode);
3560 switch (op2) {
3561 case OPC2_32_RCPW_IMASK:
3562 /* if pos + width > 31 undefined result */
3563 if (pos + width <= 31) {
3564 tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
3565 tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
3567 break;
3568 case OPC2_32_RCPW_INSERT:
3569 /* if pos + width > 32 undefined result */
3570 if (pos + width <= 32) {
3571 temp = tcg_const_i32(const4);
3572 tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
3573 tcg_temp_free(temp);
3575 break;
3579 /* RCRW format */
3581 static void decode_rcrw_insert(CPUTriCoreState *env, DisasContext *ctx)
3583 uint32_t op2;
3584 int r1, r3, r4;
3585 int32_t width, const4;
3587 TCGv temp, temp2, temp3;
3589 op2 = MASK_OP_RCRW_OP2(ctx->opcode);
3590 r1 = MASK_OP_RCRW_S1(ctx->opcode);
3591 r3 = MASK_OP_RCRW_S3(ctx->opcode);
3592 r4 = MASK_OP_RCRW_D(ctx->opcode);
3593 width = MASK_OP_RCRW_WIDTH(ctx->opcode);
3594 const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
3596 temp = tcg_temp_new();
3597 temp2 = tcg_temp_new();
3599 switch (op2) {
3600 case OPC2_32_RCRW_IMASK:
3601 tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f);
3602 tcg_gen_movi_tl(temp2, (1 << width) - 1);
3603 tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp);
3604 tcg_gen_movi_tl(temp2, const4);
3605 tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp);
3606 break;
3607 case OPC2_32_RCRW_INSERT:
3608 temp3 = tcg_temp_new();
3610 tcg_gen_movi_tl(temp, width);
3611 tcg_gen_movi_tl(temp2, const4);
3612 tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f);
3613 gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3);
3615 tcg_temp_free(temp3);
3616 break;
3618 tcg_temp_free(temp);
3619 tcg_temp_free(temp2);
3622 /* RCR format */
3624 static void decode_rcr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
3626 uint32_t op2;
3627 int r1, r3, r4;
3628 int32_t const9;
3630 TCGv temp, temp2;
3632 op2 = MASK_OP_RCR_OP2(ctx->opcode);
3633 r1 = MASK_OP_RCR_S1(ctx->opcode);
3634 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
3635 r3 = MASK_OP_RCR_S3(ctx->opcode);
3636 r4 = MASK_OP_RCR_D(ctx->opcode);
3638 switch (op2) {
3639 case OPC2_32_RCR_CADD:
3640 gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
3641 cpu_gpr_d[r4]);
3642 break;
3643 case OPC2_32_RCR_CADDN:
3644 gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
3645 cpu_gpr_d[r4]);
3646 break;
3647 case OPC2_32_RCR_SEL:
3648 temp = tcg_const_i32(0);
3649 temp2 = tcg_const_i32(const9);
3650 tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r4], temp,
3651 cpu_gpr_d[r1], temp2);
3652 tcg_temp_free(temp);
3653 tcg_temp_free(temp2);
3654 break;
3655 case OPC2_32_RCR_SELN:
3656 temp = tcg_const_i32(0);
3657 temp2 = tcg_const_i32(const9);
3658 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r4], temp,
3659 cpu_gpr_d[r1], temp2);
3660 tcg_temp_free(temp);
3661 tcg_temp_free(temp2);
3662 break;
3666 static void decode_rcr_madd(CPUTriCoreState *env, DisasContext *ctx)
3668 uint32_t op2;
3669 int r1, r3, r4;
3670 int32_t const9;
3673 op2 = MASK_OP_RCR_OP2(ctx->opcode);
3674 r1 = MASK_OP_RCR_S1(ctx->opcode);
3675 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
3676 r3 = MASK_OP_RCR_S3(ctx->opcode);
3677 r4 = MASK_OP_RCR_D(ctx->opcode);
3679 switch (op2) {
3680 case OPC2_32_RCR_MADD_32:
3681 gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3682 break;
3683 case OPC2_32_RCR_MADD_64:
3684 gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3685 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3686 break;
3687 case OPC2_32_RCR_MADDS_32:
3688 gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3689 break;
3690 case OPC2_32_RCR_MADDS_64:
3691 gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3692 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3693 break;
3694 case OPC2_32_RCR_MADD_U_64:
3695 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3696 gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3697 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3698 break;
3699 case OPC2_32_RCR_MADDS_U_32:
3700 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3701 gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3702 break;
3703 case OPC2_32_RCR_MADDS_U_64:
3704 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3705 gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3706 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3707 break;
3711 static void decode_rcr_msub(CPUTriCoreState *env, DisasContext *ctx)
3713 uint32_t op2;
3714 int r1, r3, r4;
3715 int32_t const9;
3718 op2 = MASK_OP_RCR_OP2(ctx->opcode);
3719 r1 = MASK_OP_RCR_S1(ctx->opcode);
3720 const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
3721 r3 = MASK_OP_RCR_S3(ctx->opcode);
3722 r4 = MASK_OP_RCR_D(ctx->opcode);
3724 switch (op2) {
3725 case OPC2_32_RCR_MSUB_32:
3726 gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3727 break;
3728 case OPC2_32_RCR_MSUB_64:
3729 gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3730 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3731 break;
3732 case OPC2_32_RCR_MSUBS_32:
3733 gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3734 break;
3735 case OPC2_32_RCR_MSUBS_64:
3736 gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3737 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3738 break;
3739 case OPC2_32_RCR_MSUB_U_64:
3740 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3741 gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3742 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3743 break;
3744 case OPC2_32_RCR_MSUBS_U_32:
3745 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3746 gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
3747 break;
3748 case OPC2_32_RCR_MSUBS_U_64:
3749 const9 = MASK_OP_RCR_CONST9(ctx->opcode);
3750 gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
3751 cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
3752 break;
3756 /* RLC format */
3758 static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx,
3759 uint32_t op1)
3761 int32_t const16;
3762 int r1, r2;
3764 const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
3765 r1 = MASK_OP_RLC_S1(ctx->opcode);
3766 r2 = MASK_OP_RLC_D(ctx->opcode);
3768 switch (op1) {
3769 case OPC1_32_RLC_ADDI:
3770 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
3771 break;
3772 case OPC1_32_RLC_ADDIH:
3773 gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
3774 break;
3775 case OPC1_32_RLC_ADDIH_A:
3776 tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
3777 break;
3778 case OPC1_32_RLC_MFCR:
3779 gen_mfcr(env, cpu_gpr_d[r2], const16);
3780 break;
3781 case OPC1_32_RLC_MOV:
3782 tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
3783 break;
3784 case OPC1_32_RLC_MOV_U:
3785 const16 = MASK_OP_RLC_CONST16(ctx->opcode);
3786 tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
3787 break;
3788 case OPC1_32_RLC_MOV_H:
3789 tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
3790 break;
3791 case OPC1_32_RLC_MOVH_A:
3792 tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
3793 break;
3794 case OPC1_32_RLC_MTCR:
3795 gen_mtcr(env, ctx, cpu_gpr_d[r2], const16);
3796 break;
3800 static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
3802 int op1;
3803 int32_t r1, r2, r3;
3804 int32_t address, const16;
3805 int8_t b, const4;
3806 int32_t bpos;
3807 TCGv temp, temp2, temp3;
3809 op1 = MASK_OP_MAJOR(ctx->opcode);
3811 /* handle JNZ.T opcode only being 6 bit long */
3812 if (unlikely((op1 & 0x3f) == OPCM_32_BRN_JTT)) {
3813 op1 = OPCM_32_BRN_JTT;
3816 switch (op1) {
3817 /* ABS-format */
3818 case OPCM_32_ABS_LDW:
3819 decode_abs_ldw(env, ctx);
3820 break;
3821 case OPCM_32_ABS_LDB:
3822 decode_abs_ldb(env, ctx);
3823 break;
3824 case OPCM_32_ABS_LDMST_SWAP:
3825 decode_abs_ldst_swap(env, ctx);
3826 break;
3827 case OPCM_32_ABS_LDST_CONTEXT:
3828 decode_abs_ldst_context(env, ctx);
3829 break;
3830 case OPCM_32_ABS_STORE:
3831 decode_abs_store(env, ctx);
3832 break;
3833 case OPCM_32_ABS_STOREB_H:
3834 decode_abs_storeb_h(env, ctx);
3835 break;
3836 case OPC1_32_ABS_STOREQ:
3837 address = MASK_OP_ABS_OFF18(ctx->opcode);
3838 r1 = MASK_OP_ABS_S1D(ctx->opcode);
3839 temp = tcg_const_i32(EA_ABS_FORMAT(address));
3840 temp2 = tcg_temp_new();
3842 tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
3843 tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
3845 tcg_temp_free(temp2);
3846 tcg_temp_free(temp);
3847 break;
3848 case OPC1_32_ABS_LD_Q:
3849 address = MASK_OP_ABS_OFF18(ctx->opcode);
3850 r1 = MASK_OP_ABS_S1D(ctx->opcode);
3851 temp = tcg_const_i32(EA_ABS_FORMAT(address));
3853 tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
3854 tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
3856 tcg_temp_free(temp);
3857 break;
3858 case OPC1_32_ABS_LEA:
3859 address = MASK_OP_ABS_OFF18(ctx->opcode);
3860 r1 = MASK_OP_ABS_S1D(ctx->opcode);
3861 tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
3862 break;
3863 /* ABSB-format */
3864 case OPC1_32_ABSB_ST_T:
3865 address = MASK_OP_ABS_OFF18(ctx->opcode);
3866 b = MASK_OP_ABSB_B(ctx->opcode);
3867 bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
3869 temp = tcg_const_i32(EA_ABS_FORMAT(address));
3870 temp2 = tcg_temp_new();
3872 tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
3873 tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
3874 tcg_gen_ori_tl(temp2, temp2, (b << bpos));
3875 tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
3877 tcg_temp_free(temp);
3878 tcg_temp_free(temp2);
3879 break;
3880 /* B-format */
3881 case OPC1_32_B_CALL:
3882 case OPC1_32_B_CALLA:
3883 case OPC1_32_B_J:
3884 case OPC1_32_B_JA:
3885 case OPC1_32_B_JL:
3886 case OPC1_32_B_JLA:
3887 address = MASK_OP_B_DISP24(ctx->opcode);
3888 gen_compute_branch(ctx, op1, 0, 0, 0, address);
3889 break;
3890 /* Bit-format */
3891 case OPCM_32_BIT_ANDACC:
3892 decode_bit_andacc(env, ctx);
3893 break;
3894 case OPCM_32_BIT_LOGICAL_T1:
3895 decode_bit_logical_t(env, ctx);
3896 break;
3897 case OPCM_32_BIT_INSERT:
3898 decode_bit_insert(env, ctx);
3899 break;
3900 case OPCM_32_BIT_LOGICAL_T2:
3901 decode_bit_logical_t2(env, ctx);
3902 break;
3903 case OPCM_32_BIT_ORAND:
3904 decode_bit_orand(env, ctx);
3905 break;
3906 case OPCM_32_BIT_SH_LOGIC1:
3907 decode_bit_sh_logic1(env, ctx);
3908 break;
3909 case OPCM_32_BIT_SH_LOGIC2:
3910 decode_bit_sh_logic2(env, ctx);
3911 break;
3912 /* BO Format */
3913 case OPCM_32_BO_ADDRMODE_POST_PRE_BASE:
3914 decode_bo_addrmode_post_pre_base(env, ctx);
3915 break;
3916 case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR:
3917 decode_bo_addrmode_bitreverse_circular(env, ctx);
3918 break;
3919 case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE:
3920 decode_bo_addrmode_ld_post_pre_base(env, ctx);
3921 break;
3922 case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR:
3923 decode_bo_addrmode_ld_bitreverse_circular(env, ctx);
3924 break;
3925 case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE:
3926 decode_bo_addrmode_stctx_post_pre_base(env, ctx);
3927 break;
3928 case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR:
3929 decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx);
3930 break;
3931 /* BOL-format */
3932 case OPC1_32_BOL_LD_A_LONGOFF:
3933 case OPC1_32_BOL_LD_W_LONFOFF:
3934 case OPC1_32_BOL_LEA_LONGOFF:
3935 case OPC1_32_BOL_ST_W_LONGOFF:
3936 case OPC1_32_BOL_ST_A_LONGOFF:
3937 decode_bol_opc(env, ctx, op1);
3938 break;
3939 /* BRC Format */
3940 case OPCM_32_BRC_EQ_NEQ:
3941 case OPCM_32_BRC_GE:
3942 case OPCM_32_BRC_JLT:
3943 case OPCM_32_BRC_JNE:
3944 const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode);
3945 address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode);
3946 r1 = MASK_OP_BRC_S1(ctx->opcode);
3947 gen_compute_branch(ctx, op1, r1, 0, const4, address);
3948 break;
3949 /* BRN Format */
3950 case OPCM_32_BRN_JTT:
3951 address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode);
3952 r1 = MASK_OP_BRN_S1(ctx->opcode);
3953 gen_compute_branch(ctx, op1, r1, 0, 0, address);
3954 break;
3955 /* BRR Format */
3956 case OPCM_32_BRR_EQ_NEQ:
3957 case OPCM_32_BRR_ADDR_EQ_NEQ:
3958 case OPCM_32_BRR_GE:
3959 case OPCM_32_BRR_JLT:
3960 case OPCM_32_BRR_JNE:
3961 case OPCM_32_BRR_JNZ:
3962 case OPCM_32_BRR_LOOP:
3963 address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode);
3964 r2 = MASK_OP_BRR_S2(ctx->opcode);
3965 r1 = MASK_OP_BRR_S1(ctx->opcode);
3966 gen_compute_branch(ctx, op1, r1, r2, 0, address);
3967 break;
3968 /* RC Format */
3969 case OPCM_32_RC_LOGICAL_SHIFT:
3970 decode_rc_logical_shift(env, ctx);
3971 break;
3972 case OPCM_32_RC_ACCUMULATOR:
3973 decode_rc_accumulator(env, ctx);
3974 break;
3975 case OPCM_32_RC_SERVICEROUTINE:
3976 decode_rc_serviceroutine(env, ctx);
3977 break;
3978 case OPCM_32_RC_MUL:
3979 decode_rc_mul(env, ctx);
3980 break;
3981 /* RCPW Format */
3982 case OPCM_32_RCPW_MASK_INSERT:
3983 decode_rcpw_insert(env, ctx);
3984 break;
3985 /* RCRR Format */
3986 case OPC1_32_RCRR_INSERT:
3987 r1 = MASK_OP_RCRR_S1(ctx->opcode);
3988 r2 = MASK_OP_RCRR_S3(ctx->opcode);
3989 r3 = MASK_OP_RCRR_D(ctx->opcode);
3990 const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
3991 temp = tcg_const_i32(const16);
3992 temp2 = tcg_temp_new(); /* width*/
3993 temp3 = tcg_temp_new(); /* pos */
3995 tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
3996 tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
3998 gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
4000 tcg_temp_free(temp);
4001 tcg_temp_free(temp2);
4002 tcg_temp_free(temp3);
4003 break;
4004 /* RCRW Format */
4005 case OPCM_32_RCRW_MASK_INSERT:
4006 decode_rcrw_insert(env, ctx);
4007 break;
4008 /* RCR Format */
4009 case OPCM_32_RCR_COND_SELECT:
4010 decode_rcr_cond_select(env, ctx);
4011 break;
4012 case OPCM_32_RCR_MADD:
4013 decode_rcr_madd(env, ctx);
4014 break;
4015 case OPCM_32_RCR_MSUB:
4016 decode_rcr_msub(env, ctx);
4017 break;
4018 /* RLC Format */
4019 case OPC1_32_RLC_ADDI:
4020 case OPC1_32_RLC_ADDIH:
4021 case OPC1_32_RLC_ADDIH_A:
4022 case OPC1_32_RLC_MFCR:
4023 case OPC1_32_RLC_MOV:
4024 case OPC1_32_RLC_MOV_U:
4025 case OPC1_32_RLC_MOV_H:
4026 case OPC1_32_RLC_MOVH_A:
4027 case OPC1_32_RLC_MTCR:
4028 decode_rlc_opc(env, ctx, op1);
4029 break;
4033 static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
4035 /* 16-Bit Instruction */
4036 if ((ctx->opcode & 0x1) == 0) {
4037 ctx->next_pc = ctx->pc + 2;
4038 decode_16Bit_opc(env, ctx);
4039 /* 32-Bit Instruction */
4040 } else {
4041 ctx->next_pc = ctx->pc + 4;
4042 decode_32Bit_opc(env, ctx);
4046 static inline void
4047 gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
4048 int search_pc)
4050 CPUState *cs = CPU(cpu);
4051 CPUTriCoreState *env = &cpu->env;
4052 DisasContext ctx;
4053 target_ulong pc_start;
4054 int num_insns;
4055 uint16_t *gen_opc_end;
4057 if (search_pc) {
4058 qemu_log("search pc %d\n", search_pc);
4061 num_insns = 0;
4062 pc_start = tb->pc;
4063 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4064 ctx.pc = pc_start;
4065 ctx.saved_pc = -1;
4066 ctx.tb = tb;
4067 ctx.singlestep_enabled = cs->singlestep_enabled;
4068 ctx.bstate = BS_NONE;
4069 ctx.mem_idx = cpu_mmu_index(env);
4071 tcg_clear_temp_count();
4072 gen_tb_start();
4073 while (ctx.bstate == BS_NONE) {
4074 ctx.opcode = cpu_ldl_code(env, ctx.pc);
4075 decode_opc(env, &ctx, 0);
4077 num_insns++;
4079 if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
4080 gen_save_pc(ctx.next_pc);
4081 tcg_gen_exit_tb(0);
4082 break;
4084 if (singlestep) {
4085 gen_save_pc(ctx.next_pc);
4086 tcg_gen_exit_tb(0);
4087 break;
4089 ctx.pc = ctx.next_pc;
4092 gen_tb_end(tb, num_insns);
4093 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4094 if (search_pc) {
4095 printf("done_generating search pc\n");
4096 } else {
4097 tb->size = ctx.pc - pc_start;
4098 tb->icount = num_insns;
4100 if (tcg_check_temp_count()) {
4101 printf("LEAK at %08x\n", env->PC);
4104 #ifdef DEBUG_DISAS
4105 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4106 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4107 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
4108 qemu_log("\n");
4110 #endif
4113 void
4114 gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
4116 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, false);
4119 void
4120 gen_intermediate_code_pc(CPUTriCoreState *env, struct TranslationBlock *tb)
4122 gen_intermediate_code_internal(tricore_env_get_cpu(env), tb, true);
4125 void
4126 restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, int pc_pos)
4128 env->PC = tcg_ctx.gen_opc_pc[pc_pos];
4132 * Initialization
4136 void cpu_state_reset(CPUTriCoreState *env)
4138 /* Reset Regs to Default Value */
4139 env->PSW = 0xb80;
4142 static void tricore_tcg_init_csfr(void)
4144 cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
4145 offsetof(CPUTriCoreState, PCXI), "PCXI");
4146 cpu_PSW = tcg_global_mem_new(TCG_AREG0,
4147 offsetof(CPUTriCoreState, PSW), "PSW");
4148 cpu_PC = tcg_global_mem_new(TCG_AREG0,
4149 offsetof(CPUTriCoreState, PC), "PC");
4150 cpu_ICR = tcg_global_mem_new(TCG_AREG0,
4151 offsetof(CPUTriCoreState, ICR), "ICR");
4154 void tricore_tcg_init(void)
4156 int i;
4157 static int inited;
4158 if (inited) {
4159 return;
4161 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
4162 /* reg init */
4163 for (i = 0 ; i < 16 ; i++) {
4164 cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
4165 offsetof(CPUTriCoreState, gpr_a[i]),
4166 regnames_a[i]);
4168 for (i = 0 ; i < 16 ; i++) {
4169 cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
4170 offsetof(CPUTriCoreState, gpr_d[i]),
4171 regnames_d[i]);
4173 tricore_tcg_init_csfr();
4174 /* init PSW flag cache */
4175 cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
4176 offsetof(CPUTriCoreState, PSW_USB_C),
4177 "PSW_C");
4178 cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
4179 offsetof(CPUTriCoreState, PSW_USB_V),
4180 "PSW_V");
4181 cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
4182 offsetof(CPUTriCoreState, PSW_USB_SV),
4183 "PSW_SV");
4184 cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
4185 offsetof(CPUTriCoreState, PSW_USB_AV),
4186 "PSW_AV");
4187 cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
4188 offsetof(CPUTriCoreState, PSW_USB_SAV),
4189 "PSW_SAV");