virtio-mem: Drop precopy notifier
[qemu/kevin.git] / target / rx / translate.c
blob5db8f79a82e443cb6bdaa977eb98fe072df5f6c6
1 /*
2 * RX translation
4 * Copyright (c) 2019 Yoshinori Sato
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
31 typedef struct DisasContext {
32 DisasContextBase base;
33 CPURXState *env;
34 uint32_t pc;
35 } DisasContext;
37 typedef struct DisasCompare {
38 TCGv value;
39 TCGv temp;
40 TCGCond cond;
41 } DisasCompare;
43 const char *rx_crname(uint8_t cr)
45 static const char *cr_names[] = {
46 "psw", "pc", "usp", "fpsw", "", "", "", "",
47 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
49 if (cr >= ARRAY_SIZE(cr_names)) {
50 return "illegal";
52 return cr_names[cr];
55 /* Target-specific values for dc->base.is_jmp. */
56 #define DISAS_JUMP DISAS_TARGET_0
57 #define DISAS_UPDATE DISAS_TARGET_1
58 #define DISAS_EXIT DISAS_TARGET_2
60 /* global register indexes */
61 static TCGv cpu_regs[16];
62 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
63 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
64 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
65 static TCGv cpu_fintv, cpu_intb, cpu_pc;
66 static TCGv_i64 cpu_acc;
68 #define cpu_sp cpu_regs[0]
70 #include "exec/gen-icount.h"
72 /* decoder helper */
73 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
74 int i, int n)
76 while (++i <= n) {
77 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
78 insn |= b << (32 - i * 8);
80 return insn;
83 static uint32_t li(DisasContext *ctx, int sz)
85 int32_t tmp, addr;
86 CPURXState *env = ctx->env;
87 addr = ctx->base.pc_next;
89 tcg_debug_assert(sz < 4);
90 switch (sz) {
91 case 1:
92 ctx->base.pc_next += 1;
93 return cpu_ldsb_code(env, addr);
94 case 2:
95 ctx->base.pc_next += 2;
96 return cpu_ldsw_code(env, addr);
97 case 3:
98 ctx->base.pc_next += 3;
99 tmp = cpu_ldsb_code(env, addr + 2) << 16;
100 tmp |= cpu_lduw_code(env, addr) & 0xffff;
101 return tmp;
102 case 0:
103 ctx->base.pc_next += 4;
104 return cpu_ldl_code(env, addr);
106 return 0;
109 static int bdsp_s(DisasContext *ctx, int d)
112 * 0 -> 8
113 * 1 -> 9
114 * 2 -> 10
115 * 3 -> 3
117 * 7 -> 7
119 if (d < 3) {
120 d += 8;
122 return d;
125 /* Include the auto-generated decoder. */
126 #include "decode-insns.c.inc"
128 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
130 RXCPU *cpu = RX_CPU(cs);
131 CPURXState *env = &cpu->env;
132 int i;
133 uint32_t psw;
135 psw = rx_cpu_pack_psw(env);
136 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
137 env->pc, psw);
138 for (i = 0; i < 16; i += 4) {
139 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
140 i, env->regs[i], i + 1, env->regs[i + 1],
141 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
145 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
147 if (translator_use_goto_tb(&dc->base, dest)) {
148 tcg_gen_goto_tb(n);
149 tcg_gen_movi_i32(cpu_pc, dest);
150 tcg_gen_exit_tb(dc->base.tb, n);
151 } else {
152 tcg_gen_movi_i32(cpu_pc, dest);
153 tcg_gen_lookup_and_goto_ptr();
155 dc->base.is_jmp = DISAS_NORETURN;
158 /* generic load wrapper */
159 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
161 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
164 /* unsigned load wrapper */
165 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
167 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
170 /* generic store wrapper */
171 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
173 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
176 /* [ri, rb] */
177 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
178 int size, int ri, int rb)
180 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
181 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
184 /* dsp[reg] */
185 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
186 int ld, int size, int reg)
188 uint32_t dsp;
190 tcg_debug_assert(ld < 3);
191 switch (ld) {
192 case 0:
193 return cpu_regs[reg];
194 case 1:
195 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
196 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
197 ctx->base.pc_next += 1;
198 return mem;
199 case 2:
200 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
201 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
202 ctx->base.pc_next += 2;
203 return mem;
205 return NULL;
208 static inline MemOp mi_to_mop(unsigned mi)
210 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
211 tcg_debug_assert(mi < 5);
212 return mop[mi];
215 /* load source operand */
216 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
217 int ld, int mi, int rs)
219 TCGv addr;
220 MemOp mop;
221 if (ld < 3) {
222 mop = mi_to_mop(mi);
223 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
224 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
225 return mem;
226 } else {
227 return cpu_regs[rs];
231 /* Processor mode check */
232 static int is_privileged(DisasContext *ctx, int is_exception)
234 if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) {
235 if (is_exception) {
236 gen_helper_raise_privilege_violation(cpu_env);
238 return 0;
239 } else {
240 return 1;
244 /* generate QEMU condition */
245 static void psw_cond(DisasCompare *dc, uint32_t cond)
247 tcg_debug_assert(cond < 16);
248 switch (cond) {
249 case 0: /* z */
250 dc->cond = TCG_COND_EQ;
251 dc->value = cpu_psw_z;
252 break;
253 case 1: /* nz */
254 dc->cond = TCG_COND_NE;
255 dc->value = cpu_psw_z;
256 break;
257 case 2: /* c */
258 dc->cond = TCG_COND_NE;
259 dc->value = cpu_psw_c;
260 break;
261 case 3: /* nc */
262 dc->cond = TCG_COND_EQ;
263 dc->value = cpu_psw_c;
264 break;
265 case 4: /* gtu (C& ~Z) == 1 */
266 case 5: /* leu (C& ~Z) == 0 */
267 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
268 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
269 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
270 dc->value = dc->temp;
271 break;
272 case 6: /* pz (S == 0) */
273 dc->cond = TCG_COND_GE;
274 dc->value = cpu_psw_s;
275 break;
276 case 7: /* n (S == 1) */
277 dc->cond = TCG_COND_LT;
278 dc->value = cpu_psw_s;
279 break;
280 case 8: /* ge (S^O)==0 */
281 case 9: /* lt (S^O)==1 */
282 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
283 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
284 dc->value = dc->temp;
285 break;
286 case 10: /* gt ((S^O)|Z)==0 */
287 case 11: /* le ((S^O)|Z)==1 */
288 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
289 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
290 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
291 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
292 dc->value = dc->temp;
293 break;
294 case 12: /* o */
295 dc->cond = TCG_COND_LT;
296 dc->value = cpu_psw_o;
297 break;
298 case 13: /* no */
299 dc->cond = TCG_COND_GE;
300 dc->value = cpu_psw_o;
301 break;
302 case 14: /* always true */
303 dc->cond = TCG_COND_ALWAYS;
304 dc->value = dc->temp;
305 break;
306 case 15: /* always false */
307 dc->cond = TCG_COND_NEVER;
308 dc->value = dc->temp;
309 break;
313 static void move_from_cr(TCGv ret, int cr, uint32_t pc)
315 TCGv z = tcg_const_i32(0);
316 switch (cr) {
317 case 0: /* PSW */
318 gen_helper_pack_psw(ret, cpu_env);
319 break;
320 case 1: /* PC */
321 tcg_gen_movi_i32(ret, pc);
322 break;
323 case 2: /* USP */
324 tcg_gen_movcond_i32(TCG_COND_NE, ret,
325 cpu_psw_u, z, cpu_sp, cpu_usp);
326 break;
327 case 3: /* FPSW */
328 tcg_gen_mov_i32(ret, cpu_fpsw);
329 break;
330 case 8: /* BPSW */
331 tcg_gen_mov_i32(ret, cpu_bpsw);
332 break;
333 case 9: /* BPC */
334 tcg_gen_mov_i32(ret, cpu_bpc);
335 break;
336 case 10: /* ISP */
337 tcg_gen_movcond_i32(TCG_COND_EQ, ret,
338 cpu_psw_u, z, cpu_sp, cpu_isp);
339 break;
340 case 11: /* FINTV */
341 tcg_gen_mov_i32(ret, cpu_fintv);
342 break;
343 case 12: /* INTB */
344 tcg_gen_mov_i32(ret, cpu_intb);
345 break;
346 default:
347 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
348 /* Unimplement registers return 0 */
349 tcg_gen_movi_i32(ret, 0);
350 break;
352 tcg_temp_free(z);
355 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
357 TCGv z;
358 if (cr >= 8 && !is_privileged(ctx, 0)) {
359 /* Some control registers can only be written in privileged mode. */
360 qemu_log_mask(LOG_GUEST_ERROR,
361 "disallow control register write %s", rx_crname(cr));
362 return;
364 z = tcg_const_i32(0);
365 switch (cr) {
366 case 0: /* PSW */
367 gen_helper_set_psw(cpu_env, val);
368 break;
369 /* case 1: to PC not supported */
370 case 2: /* USP */
371 tcg_gen_mov_i32(cpu_usp, val);
372 tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp,
373 cpu_psw_u, z, cpu_usp, cpu_sp);
374 break;
375 case 3: /* FPSW */
376 gen_helper_set_fpsw(cpu_env, val);
377 break;
378 case 8: /* BPSW */
379 tcg_gen_mov_i32(cpu_bpsw, val);
380 break;
381 case 9: /* BPC */
382 tcg_gen_mov_i32(cpu_bpc, val);
383 break;
384 case 10: /* ISP */
385 tcg_gen_mov_i32(cpu_isp, val);
386 /* if PSW.U is 0, copy isp to r0 */
387 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp,
388 cpu_psw_u, z, cpu_isp, cpu_sp);
389 break;
390 case 11: /* FINTV */
391 tcg_gen_mov_i32(cpu_fintv, val);
392 break;
393 case 12: /* INTB */
394 tcg_gen_mov_i32(cpu_intb, val);
395 break;
396 default:
397 qemu_log_mask(LOG_GUEST_ERROR,
398 "Unimplement control register %d", cr);
399 break;
401 tcg_temp_free(z);
404 static void push(TCGv val)
406 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
407 rx_gen_st(MO_32, val, cpu_sp);
410 static void pop(TCGv ret)
412 rx_gen_ld(MO_32, ret, cpu_sp);
413 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
416 /* mov.<bwl> rs,dsp5[rd] */
417 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
419 TCGv mem;
420 mem = tcg_temp_new();
421 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
422 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
423 tcg_temp_free(mem);
424 return true;
427 /* mov.<bwl> dsp5[rs],rd */
428 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
430 TCGv mem;
431 mem = tcg_temp_new();
432 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
433 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
434 tcg_temp_free(mem);
435 return true;
438 /* mov.l #uimm4,rd */
439 /* mov.l #uimm8,rd */
440 /* mov.l #imm,rd */
441 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
443 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
444 return true;
447 /* mov.<bwl> #uimm8,dsp[rd] */
448 /* mov.<bwl> #imm, dsp[rd] */
449 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
451 TCGv imm, mem;
452 imm = tcg_const_i32(a->imm);
453 mem = tcg_temp_new();
454 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
455 rx_gen_st(a->sz, imm, mem);
456 tcg_temp_free(imm);
457 tcg_temp_free(mem);
458 return true;
461 /* mov.<bwl> [ri,rb],rd */
462 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
464 TCGv mem;
465 mem = tcg_temp_new();
466 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
467 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
468 tcg_temp_free(mem);
469 return true;
472 /* mov.<bwl> rd,[ri,rb] */
473 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
475 TCGv mem;
476 mem = tcg_temp_new();
477 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
478 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
479 tcg_temp_free(mem);
480 return true;
483 /* mov.<bwl> dsp[rs],dsp[rd] */
484 /* mov.<bwl> rs,dsp[rd] */
485 /* mov.<bwl> dsp[rs],rd */
486 /* mov.<bwl> rs,rd */
487 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
489 static void (* const mov[])(TCGv ret, TCGv arg) = {
490 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
492 TCGv tmp, mem, addr;
493 if (a->lds == 3 && a->ldd == 3) {
494 /* mov.<bwl> rs,rd */
495 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
496 return true;
499 mem = tcg_temp_new();
500 if (a->lds == 3) {
501 /* mov.<bwl> rs,dsp[rd] */
502 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
503 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
504 } else if (a->ldd == 3) {
505 /* mov.<bwl> dsp[rs],rd */
506 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
507 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
508 } else {
509 /* mov.<bwl> dsp[rs],dsp[rd] */
510 tmp = tcg_temp_new();
511 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
512 rx_gen_ld(a->sz, tmp, addr);
513 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
514 rx_gen_st(a->sz, tmp, addr);
515 tcg_temp_free(tmp);
517 tcg_temp_free(mem);
518 return true;
521 /* mov.<bwl> rs,[rd+] */
522 /* mov.<bwl> rs,[-rd] */
523 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
525 TCGv val;
526 val = tcg_temp_new();
527 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
528 if (a->ad == 1) {
529 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
531 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
532 if (a->ad == 0) {
533 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
535 tcg_temp_free(val);
536 return true;
539 /* mov.<bwl> [rd+],rs */
540 /* mov.<bwl> [-rd],rs */
541 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
543 TCGv val;
544 val = tcg_temp_new();
545 if (a->ad == 1) {
546 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
548 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
549 if (a->ad == 0) {
550 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
552 tcg_gen_mov_i32(cpu_regs[a->rs], val);
553 tcg_temp_free(val);
554 return true;
557 /* movu.<bw> dsp5[rs],rd */
558 /* movu.<bw> dsp[rs],rd */
559 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
561 TCGv mem;
562 mem = tcg_temp_new();
563 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
564 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
565 tcg_temp_free(mem);
566 return true;
569 /* movu.<bw> rs,rd */
570 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
572 static void (* const ext[])(TCGv ret, TCGv arg) = {
573 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
575 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
576 return true;
579 /* movu.<bw> [ri,rb],rd */
580 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
582 TCGv mem;
583 mem = tcg_temp_new();
584 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
585 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
586 tcg_temp_free(mem);
587 return true;
590 /* movu.<bw> [rd+],rs */
591 /* mov.<bw> [-rd],rs */
592 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
594 TCGv val;
595 val = tcg_temp_new();
596 if (a->ad == 1) {
597 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
599 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
600 if (a->ad == 0) {
601 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
603 tcg_gen_mov_i32(cpu_regs[a->rs], val);
604 tcg_temp_free(val);
605 return true;
609 /* pop rd */
610 static bool trans_POP(DisasContext *ctx, arg_POP *a)
612 /* mov.l [r0+], rd */
613 arg_MOV_rp mov_a;
614 mov_a.rd = 0;
615 mov_a.rs = a->rd;
616 mov_a.ad = 0;
617 mov_a.sz = MO_32;
618 trans_MOV_pr(ctx, &mov_a);
619 return true;
622 /* popc cr */
623 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
625 TCGv val;
626 val = tcg_temp_new();
627 pop(val);
628 move_to_cr(ctx, val, a->cr);
629 if (a->cr == 0 && is_privileged(ctx, 0)) {
630 /* PSW.I may be updated here. exit TB. */
631 ctx->base.is_jmp = DISAS_UPDATE;
633 tcg_temp_free(val);
634 return true;
637 /* popm rd-rd2 */
638 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
640 int r;
641 if (a->rd == 0 || a->rd >= a->rd2) {
642 qemu_log_mask(LOG_GUEST_ERROR,
643 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
645 r = a->rd;
646 while (r <= a->rd2 && r < 16) {
647 pop(cpu_regs[r++]);
649 return true;
653 /* push.<bwl> rs */
654 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
656 TCGv val;
657 val = tcg_temp_new();
658 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
659 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
660 rx_gen_st(a->sz, val, cpu_sp);
661 tcg_temp_free(val);
662 return true;
665 /* push.<bwl> dsp[rs] */
666 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
668 TCGv mem, val, addr;
669 mem = tcg_temp_new();
670 val = tcg_temp_new();
671 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
672 rx_gen_ld(a->sz, val, addr);
673 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
674 rx_gen_st(a->sz, val, cpu_sp);
675 tcg_temp_free(mem);
676 tcg_temp_free(val);
677 return true;
680 /* pushc rx */
681 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
683 TCGv val;
684 val = tcg_temp_new();
685 move_from_cr(val, a->cr, ctx->pc);
686 push(val);
687 tcg_temp_free(val);
688 return true;
691 /* pushm rs-rs2 */
692 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
694 int r;
696 if (a->rs == 0 || a->rs >= a->rs2) {
697 qemu_log_mask(LOG_GUEST_ERROR,
698 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
700 r = a->rs2;
701 while (r >= a->rs && r >= 0) {
702 push(cpu_regs[r--]);
704 return true;
707 /* xchg rs,rd */
708 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
710 TCGv tmp;
711 tmp = tcg_temp_new();
712 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
713 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
714 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
715 tcg_temp_free(tmp);
716 return true;
719 /* xchg dsp[rs].<mi>,rd */
720 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
722 TCGv mem, addr;
723 mem = tcg_temp_new();
724 switch (a->mi) {
725 case 0: /* dsp[rs].b */
726 case 1: /* dsp[rs].w */
727 case 2: /* dsp[rs].l */
728 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
729 break;
730 case 3: /* dsp[rs].uw */
731 case 4: /* dsp[rs].ub */
732 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
733 break;
734 default:
735 g_assert_not_reached();
737 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
738 0, mi_to_mop(a->mi));
739 tcg_temp_free(mem);
740 return true;
743 static inline void stcond(TCGCond cond, int rd, int imm)
745 TCGv z;
746 TCGv _imm;
747 z = tcg_const_i32(0);
748 _imm = tcg_const_i32(imm);
749 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
750 _imm, cpu_regs[rd]);
751 tcg_temp_free(z);
752 tcg_temp_free(_imm);
755 /* stz #imm,rd */
756 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
758 stcond(TCG_COND_EQ, a->rd, a->imm);
759 return true;
762 /* stnz #imm,rd */
763 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
765 stcond(TCG_COND_NE, a->rd, a->imm);
766 return true;
769 /* sccnd.<bwl> rd */
770 /* sccnd.<bwl> dsp:[rd] */
771 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
773 DisasCompare dc;
774 TCGv val, mem, addr;
775 dc.temp = tcg_temp_new();
776 psw_cond(&dc, a->cd);
777 if (a->ld < 3) {
778 val = tcg_temp_new();
779 mem = tcg_temp_new();
780 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
781 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
782 rx_gen_st(a->sz, val, addr);
783 tcg_temp_free(val);
784 tcg_temp_free(mem);
785 } else {
786 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
788 tcg_temp_free(dc.temp);
789 return true;
792 /* rtsd #imm */
793 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
795 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
796 pop(cpu_pc);
797 ctx->base.is_jmp = DISAS_JUMP;
798 return true;
801 /* rtsd #imm, rd-rd2 */
802 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
804 int dst;
805 int adj;
807 if (a->rd2 >= a->rd) {
808 adj = a->imm - (a->rd2 - a->rd + 1);
809 } else {
810 adj = a->imm - (15 - a->rd + 1);
813 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
814 dst = a->rd;
815 while (dst <= a->rd2 && dst < 16) {
816 pop(cpu_regs[dst++]);
818 pop(cpu_pc);
819 ctx->base.is_jmp = DISAS_JUMP;
820 return true;
823 typedef void (*op2fn)(TCGv ret, TCGv arg1);
824 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
826 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
828 opr(cpu_regs[dst], cpu_regs[src]);
831 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
833 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
836 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
838 TCGv imm = tcg_const_i32(src2);
839 opr(cpu_regs[dst], cpu_regs[src], imm);
840 tcg_temp_free(imm);
843 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
844 int dst, int src, int ld, int mi)
846 TCGv val, mem;
847 mem = tcg_temp_new();
848 val = rx_load_source(ctx, mem, ld, mi, src);
849 opr(cpu_regs[dst], cpu_regs[dst], val);
850 tcg_temp_free(mem);
853 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
855 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
856 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
857 tcg_gen_mov_i32(ret, cpu_psw_s);
860 /* and #uimm:4, rd */
861 /* and #imm, rd */
862 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
864 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
865 return true;
868 /* and dsp[rs], rd */
869 /* and rs,rd */
870 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
872 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
873 return true;
876 /* and rs,rs2,rd */
877 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
879 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
880 return true;
883 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
885 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
886 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
887 tcg_gen_mov_i32(ret, cpu_psw_s);
890 /* or #uimm:4, rd */
891 /* or #imm, rd */
892 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
894 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
895 return true;
898 /* or dsp[rs], rd */
899 /* or rs,rd */
900 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
902 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
903 return true;
906 /* or rs,rs2,rd */
907 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
909 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
910 return true;
913 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
915 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
916 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
917 tcg_gen_mov_i32(ret, cpu_psw_s);
920 /* xor #imm, rd */
921 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
923 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
924 return true;
927 /* xor dsp[rs], rd */
928 /* xor rs,rd */
929 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
931 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
932 return true;
935 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
937 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
938 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
941 /* tst #imm, rd */
942 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
944 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
945 return true;
948 /* tst dsp[rs], rd */
949 /* tst rs, rd */
950 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
952 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
953 return true;
956 static void rx_not(TCGv ret, TCGv arg1)
958 tcg_gen_not_i32(ret, arg1);
959 tcg_gen_mov_i32(cpu_psw_z, ret);
960 tcg_gen_mov_i32(cpu_psw_s, ret);
963 /* not rd */
964 /* not rs, rd */
965 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
967 rx_gen_op_rr(rx_not, a->rd, a->rs);
968 return true;
971 static void rx_neg(TCGv ret, TCGv arg1)
973 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
974 tcg_gen_neg_i32(ret, arg1);
975 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
976 tcg_gen_mov_i32(cpu_psw_z, ret);
977 tcg_gen_mov_i32(cpu_psw_s, ret);
981 /* neg rd */
982 /* neg rs, rd */
983 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
985 rx_gen_op_rr(rx_neg, a->rd, a->rs);
986 return true;
989 /* ret = arg1 + arg2 + psw_c */
990 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
992 TCGv z;
993 z = tcg_const_i32(0);
994 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
995 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
996 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
997 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
998 tcg_gen_xor_i32(z, arg1, arg2);
999 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1000 tcg_gen_mov_i32(ret, cpu_psw_s);
1001 tcg_temp_free(z);
1004 /* adc #imm, rd */
1005 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
1007 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
1008 return true;
1011 /* adc rs, rd */
1012 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
1014 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
1015 return true;
1018 /* adc dsp[rs], rd */
1019 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
1021 /* mi only 2 */
1022 if (a->mi != 2) {
1023 return false;
1025 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1026 return true;
1029 /* ret = arg1 + arg2 */
1030 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1032 TCGv z;
1033 z = tcg_const_i32(0);
1034 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1035 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1036 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1037 tcg_gen_xor_i32(z, arg1, arg2);
1038 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1039 tcg_gen_mov_i32(ret, cpu_psw_s);
1040 tcg_temp_free(z);
1043 /* add #uimm4, rd */
1044 /* add #imm, rs, rd */
1045 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1047 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1048 return true;
1051 /* add rs, rd */
1052 /* add dsp[rs], rd */
1053 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1055 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1056 return true;
1059 /* add rs, rs2, rd */
1060 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1062 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1063 return true;
1066 /* ret = arg1 - arg2 */
1067 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1069 TCGv temp;
1070 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1071 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1072 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1073 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1074 temp = tcg_temp_new_i32();
1075 tcg_gen_xor_i32(temp, arg1, arg2);
1076 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
1077 tcg_temp_free_i32(temp);
1078 /* CMP not required return */
1079 if (ret) {
1080 tcg_gen_mov_i32(ret, cpu_psw_s);
1083 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1085 rx_sub(NULL, arg1, arg2);
1087 /* ret = arg1 - arg2 - !psw_c */
1088 /* -> ret = arg1 + ~arg2 + psw_c */
1089 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1091 TCGv temp;
1092 temp = tcg_temp_new();
1093 tcg_gen_not_i32(temp, arg2);
1094 rx_adc(ret, arg1, temp);
1095 tcg_temp_free(temp);
1098 /* cmp #imm4, rs2 */
1099 /* cmp #imm8, rs2 */
1100 /* cmp #imm, rs2 */
1101 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1103 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1104 return true;
1107 /* cmp rs, rs2 */
1108 /* cmp dsp[rs], rs2 */
1109 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1111 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1112 return true;
1115 /* sub #imm4, rd */
1116 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1118 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1119 return true;
1122 /* sub rs, rd */
1123 /* sub dsp[rs], rd */
1124 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1126 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1127 return true;
1130 /* sub rs2, rs, rd */
1131 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1133 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1134 return true;
1137 /* sbb rs, rd */
1138 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1140 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1141 return true;
1144 /* sbb dsp[rs], rd */
1145 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1147 /* mi only 2 */
1148 if (a->mi != 2) {
1149 return false;
1151 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1152 return true;
1155 static void rx_abs(TCGv ret, TCGv arg1)
1157 TCGv neg;
1158 TCGv zero;
1159 neg = tcg_temp_new();
1160 zero = tcg_const_i32(0);
1161 tcg_gen_neg_i32(neg, arg1);
1162 tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
1163 tcg_temp_free(neg);
1164 tcg_temp_free(zero);
1167 /* abs rd */
1168 /* abs rs, rd */
1169 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1171 rx_gen_op_rr(rx_abs, a->rd, a->rs);
1172 return true;
1175 /* max #imm, rd */
1176 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1178 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1179 return true;
1182 /* max rs, rd */
1183 /* max dsp[rs], rd */
1184 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1186 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1187 return true;
1190 /* min #imm, rd */
1191 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1193 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1194 return true;
1197 /* min rs, rd */
1198 /* min dsp[rs], rd */
1199 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1201 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1202 return true;
1205 /* mul #uimm4, rd */
1206 /* mul #imm, rd */
1207 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1209 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1210 return true;
1213 /* mul rs, rd */
1214 /* mul dsp[rs], rd */
1215 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1217 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1218 return true;
1221 /* mul rs, rs2, rd */
1222 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1224 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1225 return true;
1228 /* emul #imm, rd */
1229 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1231 TCGv imm = tcg_const_i32(a->imm);
1232 if (a->rd > 14) {
1233 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1235 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1236 cpu_regs[a->rd], imm);
1237 tcg_temp_free(imm);
1238 return true;
1241 /* emul rs, rd */
1242 /* emul dsp[rs], rd */
1243 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1245 TCGv val, mem;
1246 if (a->rd > 14) {
1247 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1249 mem = tcg_temp_new();
1250 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1251 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1252 cpu_regs[a->rd], val);
1253 tcg_temp_free(mem);
1254 return true;
1257 /* emulu #imm, rd */
1258 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1260 TCGv imm = tcg_const_i32(a->imm);
1261 if (a->rd > 14) {
1262 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1264 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1265 cpu_regs[a->rd], imm);
1266 tcg_temp_free(imm);
1267 return true;
1270 /* emulu rs, rd */
1271 /* emulu dsp[rs], rd */
1272 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1274 TCGv val, mem;
1275 if (a->rd > 14) {
1276 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1278 mem = tcg_temp_new();
1279 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1280 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1281 cpu_regs[a->rd], val);
1282 tcg_temp_free(mem);
1283 return true;
1286 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1288 gen_helper_div(ret, cpu_env, arg1, arg2);
1291 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1293 gen_helper_divu(ret, cpu_env, arg1, arg2);
1296 /* div #imm, rd */
1297 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1299 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1300 return true;
1303 /* div rs, rd */
1304 /* div dsp[rs], rd */
1305 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1307 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1308 return true;
1311 /* divu #imm, rd */
1312 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1314 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1315 return true;
1318 /* divu rs, rd */
1319 /* divu dsp[rs], rd */
1320 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1322 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1323 return true;
1327 /* shll #imm:5, rd */
1328 /* shll #imm:5, rs2, rd */
1329 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1331 TCGv tmp;
1332 tmp = tcg_temp_new();
1333 if (a->imm) {
1334 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1335 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1336 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1337 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1338 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1339 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1340 } else {
1341 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1342 tcg_gen_movi_i32(cpu_psw_c, 0);
1343 tcg_gen_movi_i32(cpu_psw_o, 0);
1345 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1346 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1347 return true;
1350 /* shll rs, rd */
1351 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1353 TCGLabel *noshift, *done;
1354 TCGv count, tmp;
1356 noshift = gen_new_label();
1357 done = gen_new_label();
1358 /* if (cpu_regs[a->rs]) { */
1359 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1360 count = tcg_const_i32(32);
1361 tmp = tcg_temp_new();
1362 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1363 tcg_gen_sub_i32(count, count, tmp);
1364 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1365 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1366 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1367 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1368 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1369 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1370 tcg_gen_br(done);
1371 /* } else { */
1372 gen_set_label(noshift);
1373 tcg_gen_movi_i32(cpu_psw_c, 0);
1374 tcg_gen_movi_i32(cpu_psw_o, 0);
1375 /* } */
1376 gen_set_label(done);
1377 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1378 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1379 tcg_temp_free(count);
1380 tcg_temp_free(tmp);
1381 return true;
1384 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1385 unsigned int alith)
1387 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1388 tcg_gen_shri_i32, tcg_gen_sari_i32,
1390 tcg_debug_assert(alith < 2);
1391 if (imm) {
1392 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1393 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1394 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1395 } else {
1396 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1397 tcg_gen_movi_i32(cpu_psw_c, 0);
1399 tcg_gen_movi_i32(cpu_psw_o, 0);
1400 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1401 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1404 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1406 TCGLabel *noshift, *done;
1407 TCGv count;
1408 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1409 tcg_gen_shri_i32, tcg_gen_sari_i32,
1411 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1412 tcg_gen_shr_i32, tcg_gen_sar_i32,
1414 tcg_debug_assert(alith < 2);
1415 noshift = gen_new_label();
1416 done = gen_new_label();
1417 count = tcg_temp_new();
1418 /* if (cpu_regs[rs]) { */
1419 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1420 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1421 tcg_gen_subi_i32(count, count, 1);
1422 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1423 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1424 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1425 tcg_gen_br(done);
1426 /* } else { */
1427 gen_set_label(noshift);
1428 tcg_gen_movi_i32(cpu_psw_c, 0);
1429 /* } */
1430 gen_set_label(done);
1431 tcg_gen_movi_i32(cpu_psw_o, 0);
1432 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1433 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1434 tcg_temp_free(count);
1437 /* shar #imm:5, rd */
1438 /* shar #imm:5, rs2, rd */
1439 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1441 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1442 return true;
1445 /* shar rs, rd */
1446 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1448 shiftr_reg(a->rd, a->rs, 1);
1449 return true;
1452 /* shlr #imm:5, rd */
1453 /* shlr #imm:5, rs2, rd */
1454 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1456 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1457 return true;
1460 /* shlr rs, rd */
1461 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1463 shiftr_reg(a->rd, a->rs, 0);
1464 return true;
1467 /* rolc rd */
1468 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1470 TCGv tmp;
1471 tmp = tcg_temp_new();
1472 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1473 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1474 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1475 tcg_gen_mov_i32(cpu_psw_c, tmp);
1476 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1477 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1478 tcg_temp_free(tmp);
1479 return true;
1482 /* rorc rd */
1483 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1485 TCGv tmp;
1486 tmp = tcg_temp_new();
1487 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1488 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1489 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1490 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1491 tcg_gen_mov_i32(cpu_psw_c, tmp);
1492 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1493 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1494 return true;
1497 enum {ROTR = 0, ROTL = 1};
1498 enum {ROT_IMM = 0, ROT_REG = 1};
1499 static inline void rx_rot(int ir, int dir, int rd, int src)
1501 switch (dir) {
1502 case ROTL:
1503 if (ir == ROT_IMM) {
1504 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1505 } else {
1506 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1508 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1509 break;
1510 case ROTR:
1511 if (ir == ROT_IMM) {
1512 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1513 } else {
1514 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1516 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1517 break;
1519 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1520 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1523 /* rotl #imm, rd */
1524 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1526 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1527 return true;
1530 /* rotl rs, rd */
1531 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1533 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1534 return true;
1537 /* rotr #imm, rd */
1538 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1540 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1541 return true;
1544 /* rotr rs, rd */
1545 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1547 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1548 return true;
1551 /* revl rs, rd */
1552 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1554 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1555 return true;
1558 /* revw rs, rd */
1559 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1561 TCGv tmp;
1562 tmp = tcg_temp_new();
1563 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1564 tcg_gen_shli_i32(tmp, tmp, 8);
1565 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1566 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1567 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1568 tcg_temp_free(tmp);
1569 return true;
1572 /* conditional branch helper */
1573 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1575 DisasCompare dc;
1576 TCGLabel *t, *done;
1578 switch (cd) {
1579 case 0 ... 13:
1580 dc.temp = tcg_temp_new();
1581 psw_cond(&dc, cd);
1582 t = gen_new_label();
1583 done = gen_new_label();
1584 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1585 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1586 tcg_gen_br(done);
1587 gen_set_label(t);
1588 gen_goto_tb(ctx, 1, ctx->pc + dst);
1589 gen_set_label(done);
1590 tcg_temp_free(dc.temp);
1591 break;
1592 case 14:
1593 /* always true case */
1594 gen_goto_tb(ctx, 0, ctx->pc + dst);
1595 break;
1596 case 15:
1597 /* always false case */
1598 /* Nothing do */
1599 break;
1603 /* beq dsp:3 / bne dsp:3 */
1604 /* beq dsp:8 / bne dsp:8 */
1605 /* bc dsp:8 / bnc dsp:8 */
1606 /* bgtu dsp:8 / bleu dsp:8 */
1607 /* bpz dsp:8 / bn dsp:8 */
1608 /* bge dsp:8 / blt dsp:8 */
1609 /* bgt dsp:8 / ble dsp:8 */
1610 /* bo dsp:8 / bno dsp:8 */
1611 /* beq dsp:16 / bne dsp:16 */
1612 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1614 rx_bcnd_main(ctx, a->cd, a->dsp);
1615 return true;
1618 /* bra dsp:3 */
1619 /* bra dsp:8 */
1620 /* bra dsp:16 */
1621 /* bra dsp:24 */
1622 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1624 rx_bcnd_main(ctx, 14, a->dsp);
1625 return true;
1628 /* bra rs */
1629 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1631 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1632 ctx->base.is_jmp = DISAS_JUMP;
1633 return true;
1636 static inline void rx_save_pc(DisasContext *ctx)
1638 TCGv pc = tcg_const_i32(ctx->base.pc_next);
1639 push(pc);
1640 tcg_temp_free(pc);
1643 /* jmp rs */
1644 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1646 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1647 ctx->base.is_jmp = DISAS_JUMP;
1648 return true;
1651 /* jsr rs */
1652 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1654 rx_save_pc(ctx);
1655 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1656 ctx->base.is_jmp = DISAS_JUMP;
1657 return true;
1660 /* bsr dsp:16 */
1661 /* bsr dsp:24 */
1662 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1664 rx_save_pc(ctx);
1665 rx_bcnd_main(ctx, 14, a->dsp);
1666 return true;
1669 /* bsr rs */
1670 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1672 rx_save_pc(ctx);
1673 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1674 ctx->base.is_jmp = DISAS_JUMP;
1675 return true;
1678 /* rts */
1679 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1681 pop(cpu_pc);
1682 ctx->base.is_jmp = DISAS_JUMP;
1683 return true;
1686 /* nop */
1687 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1689 return true;
1692 /* scmpu */
1693 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1695 gen_helper_scmpu(cpu_env);
1696 return true;
1699 /* smovu */
1700 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1702 gen_helper_smovu(cpu_env);
1703 return true;
1706 /* smovf */
1707 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1709 gen_helper_smovf(cpu_env);
1710 return true;
1713 /* smovb */
1714 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1716 gen_helper_smovb(cpu_env);
1717 return true;
1720 #define STRING(op) \
1721 do { \
1722 TCGv size = tcg_const_i32(a->sz); \
1723 gen_helper_##op(cpu_env, size); \
1724 tcg_temp_free(size); \
1725 } while (0)
1727 /* suntile.<bwl> */
1728 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1730 STRING(suntil);
1731 return true;
1734 /* swhile.<bwl> */
1735 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1737 STRING(swhile);
1738 return true;
1740 /* sstr.<bwl> */
1741 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1743 STRING(sstr);
1744 return true;
1747 /* rmpa.<bwl> */
1748 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1750 STRING(rmpa);
1751 return true;
1754 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1756 TCGv_i64 tmp0, tmp1;
1757 tmp0 = tcg_temp_new_i64();
1758 tmp1 = tcg_temp_new_i64();
1759 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1760 tcg_gen_sari_i64(tmp0, tmp0, 16);
1761 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1762 tcg_gen_sari_i64(tmp1, tmp1, 16);
1763 tcg_gen_mul_i64(ret, tmp0, tmp1);
1764 tcg_gen_shli_i64(ret, ret, 16);
1765 tcg_temp_free_i64(tmp0);
1766 tcg_temp_free_i64(tmp1);
1769 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1771 TCGv_i64 tmp0, tmp1;
1772 tmp0 = tcg_temp_new_i64();
1773 tmp1 = tcg_temp_new_i64();
1774 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1775 tcg_gen_ext16s_i64(tmp0, tmp0);
1776 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1777 tcg_gen_ext16s_i64(tmp1, tmp1);
1778 tcg_gen_mul_i64(ret, tmp0, tmp1);
1779 tcg_gen_shli_i64(ret, ret, 16);
1780 tcg_temp_free_i64(tmp0);
1781 tcg_temp_free_i64(tmp1);
1784 /* mulhi rs,rs2 */
1785 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1787 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1788 return true;
1791 /* mullo rs,rs2 */
1792 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1794 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1795 return true;
1798 /* machi rs,rs2 */
1799 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1801 TCGv_i64 tmp;
1802 tmp = tcg_temp_new_i64();
1803 rx_mul64hi(tmp, a->rs, a->rs2);
1804 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1805 tcg_temp_free_i64(tmp);
1806 return true;
1809 /* maclo rs,rs2 */
1810 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1812 TCGv_i64 tmp;
1813 tmp = tcg_temp_new_i64();
1814 rx_mul64lo(tmp, a->rs, a->rs2);
1815 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1816 tcg_temp_free_i64(tmp);
1817 return true;
1820 /* mvfachi rd */
1821 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1823 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1824 return true;
1827 /* mvfacmi rd */
1828 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1830 TCGv_i64 rd64;
1831 rd64 = tcg_temp_new_i64();
1832 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1833 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1834 tcg_temp_free_i64(rd64);
1835 return true;
1838 /* mvtachi rs */
1839 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1841 TCGv_i64 rs64;
1842 rs64 = tcg_temp_new_i64();
1843 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1844 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1845 tcg_temp_free_i64(rs64);
1846 return true;
1849 /* mvtaclo rs */
1850 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1852 TCGv_i64 rs64;
1853 rs64 = tcg_temp_new_i64();
1854 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1855 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1856 tcg_temp_free_i64(rs64);
1857 return true;
1860 /* racw #imm */
1861 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1863 TCGv imm = tcg_const_i32(a->imm + 1);
1864 gen_helper_racw(cpu_env, imm);
1865 tcg_temp_free(imm);
1866 return true;
1869 /* sat rd */
1870 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1872 TCGv tmp, z;
1873 tmp = tcg_temp_new();
1874 z = tcg_const_i32(0);
1875 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1876 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1877 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1878 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1879 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1880 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1881 tcg_temp_free(tmp);
1882 tcg_temp_free(z);
1883 return true;
1886 /* satr */
1887 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1889 gen_helper_satr(cpu_env);
1890 return true;
1893 #define cat3(a, b, c) a##b##c
1894 #define FOP(name, op) \
1895 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1896 cat3(arg_, name, _ir) * a) \
1898 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1899 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1900 cpu_regs[a->rd], imm); \
1901 tcg_temp_free(imm); \
1902 return true; \
1904 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1905 cat3(arg_, name, _mr) * a) \
1907 TCGv val, mem; \
1908 mem = tcg_temp_new(); \
1909 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1910 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1911 cpu_regs[a->rd], val); \
1912 tcg_temp_free(mem); \
1913 return true; \
1916 #define FCONVOP(name, op) \
1917 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1919 TCGv val, mem; \
1920 mem = tcg_temp_new(); \
1921 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1922 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1923 tcg_temp_free(mem); \
1924 return true; \
1927 FOP(FADD, fadd)
1928 FOP(FSUB, fsub)
1929 FOP(FMUL, fmul)
1930 FOP(FDIV, fdiv)
1932 /* fcmp #imm, rd */
1933 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1935 TCGv imm = tcg_const_i32(li(ctx, 0));
1936 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
1937 tcg_temp_free(imm);
1938 return true;
1941 /* fcmp dsp[rs], rd */
1942 /* fcmp rs, rd */
1943 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1945 TCGv val, mem;
1946 mem = tcg_temp_new();
1947 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1948 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
1949 tcg_temp_free(mem);
1950 return true;
1953 FCONVOP(FTOI, ftoi)
1954 FCONVOP(ROUND, round)
1956 /* itof rs, rd */
1957 /* itof dsp[rs], rd */
1958 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1960 TCGv val, mem;
1961 mem = tcg_temp_new();
1962 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1963 gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
1964 tcg_temp_free(mem);
1965 return true;
1968 static void rx_bsetm(TCGv mem, TCGv mask)
1970 TCGv val;
1971 val = tcg_temp_new();
1972 rx_gen_ld(MO_8, val, mem);
1973 tcg_gen_or_i32(val, val, mask);
1974 rx_gen_st(MO_8, val, mem);
1975 tcg_temp_free(val);
1978 static void rx_bclrm(TCGv mem, TCGv mask)
1980 TCGv val;
1981 val = tcg_temp_new();
1982 rx_gen_ld(MO_8, val, mem);
1983 tcg_gen_andc_i32(val, val, mask);
1984 rx_gen_st(MO_8, val, mem);
1985 tcg_temp_free(val);
1988 static void rx_btstm(TCGv mem, TCGv mask)
1990 TCGv val;
1991 val = tcg_temp_new();
1992 rx_gen_ld(MO_8, val, mem);
1993 tcg_gen_and_i32(val, val, mask);
1994 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
1995 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
1996 tcg_temp_free(val);
1999 static void rx_bnotm(TCGv mem, TCGv mask)
2001 TCGv val;
2002 val = tcg_temp_new();
2003 rx_gen_ld(MO_8, val, mem);
2004 tcg_gen_xor_i32(val, val, mask);
2005 rx_gen_st(MO_8, val, mem);
2006 tcg_temp_free(val);
2009 static void rx_bsetr(TCGv reg, TCGv mask)
2011 tcg_gen_or_i32(reg, reg, mask);
2014 static void rx_bclrr(TCGv reg, TCGv mask)
2016 tcg_gen_andc_i32(reg, reg, mask);
2019 static inline void rx_btstr(TCGv reg, TCGv mask)
2021 TCGv t0;
2022 t0 = tcg_temp_new();
2023 tcg_gen_and_i32(t0, reg, mask);
2024 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
2025 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2026 tcg_temp_free(t0);
2029 static inline void rx_bnotr(TCGv reg, TCGv mask)
2031 tcg_gen_xor_i32(reg, reg, mask);
2034 #define BITOP(name, op) \
2035 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2036 cat3(arg_, name, _im) * a) \
2038 TCGv mask, mem, addr; \
2039 mem = tcg_temp_new(); \
2040 mask = tcg_const_i32(1 << a->imm); \
2041 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2042 cat3(rx_, op, m)(addr, mask); \
2043 tcg_temp_free(mask); \
2044 tcg_temp_free(mem); \
2045 return true; \
2047 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2048 cat3(arg_, name, _ir) * a) \
2050 TCGv mask; \
2051 mask = tcg_const_i32(1 << a->imm); \
2052 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2053 tcg_temp_free(mask); \
2054 return true; \
2056 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2057 cat3(arg_, name, _rr) * a) \
2059 TCGv mask, b; \
2060 mask = tcg_const_i32(1); \
2061 b = tcg_temp_new(); \
2062 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2063 tcg_gen_shl_i32(mask, mask, b); \
2064 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2065 tcg_temp_free(mask); \
2066 tcg_temp_free(b); \
2067 return true; \
2069 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2070 cat3(arg_, name, _rm) * a) \
2072 TCGv mask, mem, addr, b; \
2073 mask = tcg_const_i32(1); \
2074 b = tcg_temp_new(); \
2075 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2076 tcg_gen_shl_i32(mask, mask, b); \
2077 mem = tcg_temp_new(); \
2078 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2079 cat3(rx_, op, m)(addr, mask); \
2080 tcg_temp_free(mem); \
2081 tcg_temp_free(mask); \
2082 tcg_temp_free(b); \
2083 return true; \
2086 BITOP(BSET, bset)
2087 BITOP(BCLR, bclr)
2088 BITOP(BTST, btst)
2089 BITOP(BNOT, bnot)
2091 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2093 TCGv bit;
2094 DisasCompare dc;
2095 dc.temp = tcg_temp_new();
2096 bit = tcg_temp_new();
2097 psw_cond(&dc, cond);
2098 tcg_gen_andi_i32(val, val, ~(1 << pos));
2099 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2100 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2101 tcg_temp_free(bit);
2102 tcg_temp_free(dc.temp);
2105 /* bmcnd #imm, dsp[rd] */
2106 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2108 TCGv val, mem, addr;
2109 val = tcg_temp_new();
2110 mem = tcg_temp_new();
2111 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2112 rx_gen_ld(MO_8, val, addr);
2113 bmcnd_op(val, a->cd, a->imm);
2114 rx_gen_st(MO_8, val, addr);
2115 tcg_temp_free(val);
2116 tcg_temp_free(mem);
2117 return true;
2120 /* bmcond #imm, rd */
2121 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2123 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2124 return true;
2127 enum {
2128 PSW_C = 0,
2129 PSW_Z = 1,
2130 PSW_S = 2,
2131 PSW_O = 3,
2132 PSW_I = 8,
2133 PSW_U = 9,
2136 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2138 if (cb < 8) {
2139 switch (cb) {
2140 case PSW_C:
2141 tcg_gen_movi_i32(cpu_psw_c, val);
2142 break;
2143 case PSW_Z:
2144 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2145 break;
2146 case PSW_S:
2147 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2148 break;
2149 case PSW_O:
2150 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2151 break;
2152 default:
2153 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2154 break;
2156 } else if (is_privileged(ctx, 0)) {
2157 switch (cb) {
2158 case PSW_I:
2159 tcg_gen_movi_i32(cpu_psw_i, val);
2160 ctx->base.is_jmp = DISAS_UPDATE;
2161 break;
2162 case PSW_U:
2163 tcg_gen_movi_i32(cpu_psw_u, val);
2164 break;
2165 default:
2166 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2167 break;
2172 /* clrpsw psw */
2173 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2175 clrsetpsw(ctx, a->cb, 0);
2176 return true;
2179 /* setpsw psw */
2180 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2182 clrsetpsw(ctx, a->cb, 1);
2183 return true;
2186 /* mvtipl #imm */
2187 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2189 if (is_privileged(ctx, 1)) {
2190 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2191 ctx->base.is_jmp = DISAS_UPDATE;
2193 return true;
2196 /* mvtc #imm, rd */
2197 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2199 TCGv imm;
2201 imm = tcg_const_i32(a->imm);
2202 move_to_cr(ctx, imm, a->cr);
2203 if (a->cr == 0 && is_privileged(ctx, 0)) {
2204 ctx->base.is_jmp = DISAS_UPDATE;
2206 tcg_temp_free(imm);
2207 return true;
2210 /* mvtc rs, rd */
2211 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2213 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2214 if (a->cr == 0 && is_privileged(ctx, 0)) {
2215 ctx->base.is_jmp = DISAS_UPDATE;
2217 return true;
2220 /* mvfc rs, rd */
2221 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2223 move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc);
2224 return true;
2227 /* rtfi */
2228 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2230 TCGv psw;
2231 if (is_privileged(ctx, 1)) {
2232 psw = tcg_temp_new();
2233 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2234 tcg_gen_mov_i32(psw, cpu_bpsw);
2235 gen_helper_set_psw_rte(cpu_env, psw);
2236 ctx->base.is_jmp = DISAS_EXIT;
2237 tcg_temp_free(psw);
2239 return true;
2242 /* rte */
2243 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2245 TCGv psw;
2246 if (is_privileged(ctx, 1)) {
2247 psw = tcg_temp_new();
2248 pop(cpu_pc);
2249 pop(psw);
2250 gen_helper_set_psw_rte(cpu_env, psw);
2251 ctx->base.is_jmp = DISAS_EXIT;
2252 tcg_temp_free(psw);
2254 return true;
2257 /* brk */
2258 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2260 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2261 gen_helper_rxbrk(cpu_env);
2262 ctx->base.is_jmp = DISAS_NORETURN;
2263 return true;
2266 /* int #imm */
2267 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2269 TCGv vec;
2271 tcg_debug_assert(a->imm < 0x100);
2272 vec = tcg_const_i32(a->imm);
2273 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2274 gen_helper_rxint(cpu_env, vec);
2275 tcg_temp_free(vec);
2276 ctx->base.is_jmp = DISAS_NORETURN;
2277 return true;
2280 /* wait */
2281 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2283 if (is_privileged(ctx, 1)) {
2284 tcg_gen_addi_i32(cpu_pc, cpu_pc, 2);
2285 gen_helper_wait(cpu_env);
2287 return true;
2290 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2292 CPURXState *env = cs->env_ptr;
2293 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2294 ctx->env = env;
2297 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2301 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2303 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2305 tcg_gen_insn_start(ctx->base.pc_next);
2308 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2310 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2311 uint32_t insn;
2313 ctx->pc = ctx->base.pc_next;
2314 insn = decode_load(ctx);
2315 if (!decode(ctx, insn)) {
2316 gen_helper_raise_illegal_instruction(cpu_env);
2320 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2322 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2324 switch (ctx->base.is_jmp) {
2325 case DISAS_NEXT:
2326 case DISAS_TOO_MANY:
2327 gen_goto_tb(ctx, 0, dcbase->pc_next);
2328 break;
2329 case DISAS_JUMP:
2330 tcg_gen_lookup_and_goto_ptr();
2331 break;
2332 case DISAS_UPDATE:
2333 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2334 /* fall through */
2335 case DISAS_EXIT:
2336 tcg_gen_exit_tb(NULL, 0);
2337 break;
2338 case DISAS_NORETURN:
2339 break;
2340 default:
2341 g_assert_not_reached();
2345 static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2347 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2348 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2351 static const TranslatorOps rx_tr_ops = {
2352 .init_disas_context = rx_tr_init_disas_context,
2353 .tb_start = rx_tr_tb_start,
2354 .insn_start = rx_tr_insn_start,
2355 .translate_insn = rx_tr_translate_insn,
2356 .tb_stop = rx_tr_tb_stop,
2357 .disas_log = rx_tr_disas_log,
2360 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
2362 DisasContext dc;
2364 translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
2367 void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
2368 target_ulong *data)
2370 env->pc = data[0];
2373 #define ALLOC_REGISTER(sym, name) \
2374 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2375 offsetof(CPURXState, sym), name)
2377 void rx_translate_init(void)
2379 static const char * const regnames[NUM_REGS] = {
2380 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2381 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2383 int i;
2385 for (i = 0; i < NUM_REGS; i++) {
2386 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
2387 offsetof(CPURXState, regs[i]),
2388 regnames[i]);
2390 ALLOC_REGISTER(pc, "PC");
2391 ALLOC_REGISTER(psw_o, "PSW(O)");
2392 ALLOC_REGISTER(psw_s, "PSW(S)");
2393 ALLOC_REGISTER(psw_z, "PSW(Z)");
2394 ALLOC_REGISTER(psw_c, "PSW(C)");
2395 ALLOC_REGISTER(psw_u, "PSW(U)");
2396 ALLOC_REGISTER(psw_i, "PSW(I)");
2397 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2398 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2399 ALLOC_REGISTER(usp, "USP");
2400 ALLOC_REGISTER(fpsw, "FPSW");
2401 ALLOC_REGISTER(bpsw, "BPSW");
2402 ALLOC_REGISTER(bpc, "BPC");
2403 ALLOC_REGISTER(isp, "ISP");
2404 ALLOC_REGISTER(fintv, "FINTV");
2405 ALLOC_REGISTER(intb, "INTB");
2406 cpu_acc = tcg_global_mem_new_i64(cpu_env,
2407 offsetof(CPURXState, acc), "ACC");