SH4: Convert memory loads/stores to TCG
[qemu/mini2440.git] / target-sh4 / translate.c
blob8e01fad93e5ca53b9f85efeb369a4e4f0b93b766
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
27 #define DEBUG_DISAS
28 #define SH4_DEBUG_DISAS
29 //#define SH4_SINGLE_STEP
31 #include "cpu.h"
32 #include "exec-all.h"
33 #include "disas.h"
34 #include "helper.h"
35 #include "tcg-op.h"
36 #include "qemu-common.h"
38 typedef struct DisasContext {
39 struct TranslationBlock *tb;
40 target_ulong pc;
41 uint32_t sr;
42 uint32_t fpscr;
43 uint16_t opcode;
44 uint32_t flags;
45 int bstate;
46 int memidx;
47 uint32_t delayed_pc;
48 int singlestep_enabled;
49 } DisasContext;
51 enum {
52 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
53 * exception condition
55 BS_STOP = 1, /* We want to stop translation for any reason */
56 BS_BRANCH = 2, /* We reached a branch condition */
57 BS_EXCP = 3, /* We reached an exception condition */
60 /* global register indexes */
61 static TCGv cpu_env;
62 static TCGv cpu_gregs[24];
63 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
64 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
65 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_flags;
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
70 /* dyngen register indexes */
71 static TCGv cpu_T[2];
73 #include "gen-icount.h"
75 static void sh4_translate_init(void)
77 int i;
78 static int done_init = 0;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 if (done_init)
88 return;
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
92 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
94 for (i = 0; i < 24; i++)
95 cpu_gregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
96 offsetof(CPUState, gregs[i]),
97 gregnames[i]);
99 cpu_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
100 offsetof(CPUState, pc), "PC");
101 cpu_sr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
102 offsetof(CPUState, sr), "SR");
103 cpu_ssr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
104 offsetof(CPUState, ssr), "SSR");
105 cpu_spc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
106 offsetof(CPUState, spc), "SPC");
107 cpu_gbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
108 offsetof(CPUState, gbr), "GBR");
109 cpu_vbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
110 offsetof(CPUState, vbr), "VBR");
111 cpu_sgr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
112 offsetof(CPUState, sgr), "SGR");
113 cpu_dbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
114 offsetof(CPUState, dbr), "DBR");
115 cpu_mach = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
116 offsetof(CPUState, mach), "MACH");
117 cpu_macl = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
118 offsetof(CPUState, macl), "MACL");
119 cpu_pr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
120 offsetof(CPUState, pr), "PR");
121 cpu_fpscr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
122 offsetof(CPUState, fpscr), "FPSCR");
123 cpu_fpul = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
124 offsetof(CPUState, fpul), "FPUL");
126 cpu_flags = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
127 offsetof(CPUState, flags), "_flags_");
128 cpu_delayed_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
129 offsetof(CPUState, delayed_pc),
130 "_delayed_pc_");
132 /* register helpers */
133 #undef DEF_HELPER
134 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
135 #include "helper.h"
137 done_init = 1;
140 #ifdef CONFIG_USER_ONLY
142 #define GEN_OP_LD(width, reg) \
143 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
144 gen_op_ld##width##_T0_##reg##_raw(); \
146 #define GEN_OP_ST(width, reg) \
147 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
148 gen_op_st##width##_##reg##_T1_raw(); \
151 #else
153 #define GEN_OP_LD(width, reg) \
154 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
155 if (ctx->memidx) gen_op_ld##width##_T0_##reg##_kernel(); \
156 else gen_op_ld##width##_T0_##reg##_user();\
158 #define GEN_OP_ST(width, reg) \
159 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
160 if (ctx->memidx) gen_op_st##width##_##reg##_T1_kernel(); \
161 else gen_op_st##width##_##reg##_T1_user();\
164 #endif
166 GEN_OP_LD(fl, FT0)
167 GEN_OP_ST(fl, FT0)
168 GEN_OP_LD(fq, DT0)
169 GEN_OP_ST(fq, DT0)
171 void cpu_dump_state(CPUState * env, FILE * f,
172 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
173 int flags)
175 int i;
176 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
177 env->pc, env->sr, env->pr, env->fpscr);
178 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
179 env->spc, env->ssr, env->gbr, env->vbr);
180 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
181 env->sgr, env->dbr, env->delayed_pc, env->fpul);
182 for (i = 0; i < 24; i += 4) {
183 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
184 i, env->gregs[i], i + 1, env->gregs[i + 1],
185 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
187 if (env->flags & DELAY_SLOT) {
188 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
189 env->delayed_pc);
190 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
191 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
192 env->delayed_pc);
196 void cpu_sh4_reset(CPUSH4State * env)
198 #if defined(CONFIG_USER_ONLY)
199 env->sr = SR_FD; /* FD - kernel does lazy fpu context switch */
200 #else
201 env->sr = 0x700000F0; /* MD, RB, BL, I3-I0 */
202 #endif
203 env->vbr = 0;
204 env->pc = 0xA0000000;
205 #if defined(CONFIG_USER_ONLY)
206 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
207 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
208 #else
209 env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
210 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
211 #endif
212 env->mmucr = 0;
215 CPUSH4State *cpu_sh4_init(const char *cpu_model)
217 CPUSH4State *env;
219 env = qemu_mallocz(sizeof(CPUSH4State));
220 if (!env)
221 return NULL;
222 cpu_exec_init(env);
223 sh4_translate_init();
224 cpu_sh4_reset(env);
225 tlb_flush(env, 1);
226 return env;
229 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
231 TranslationBlock *tb;
232 tb = ctx->tb;
234 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
235 !ctx->singlestep_enabled) {
236 /* Use a direct jump if in same page and singlestep not enabled */
237 tcg_gen_goto_tb(n);
238 tcg_gen_movi_i32(cpu_pc, dest);
239 tcg_gen_exit_tb((long) tb + n);
240 } else {
241 tcg_gen_movi_i32(cpu_pc, dest);
242 if (ctx->singlestep_enabled)
243 tcg_gen_helper_0_0(helper_debug);
244 tcg_gen_exit_tb(0);
248 static void gen_jump(DisasContext * ctx)
250 if (ctx->delayed_pc == (uint32_t) - 1) {
251 /* Target is not statically known, it comes necessarily from a
252 delayed jump as immediate jump are conditinal jumps */
253 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
254 if (ctx->singlestep_enabled)
255 tcg_gen_helper_0_0(helper_debug);
256 tcg_gen_exit_tb(0);
257 } else {
258 gen_goto_tb(ctx, 0, ctx->delayed_pc);
262 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
264 int label = gen_new_label();
265 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
266 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
267 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], t ? SR_T : 0, label);
268 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
269 gen_set_label(label);
272 /* Immediate conditional jump (bt or bf) */
273 static void gen_conditional_jump(DisasContext * ctx,
274 target_ulong ift, target_ulong ifnott)
276 int l1;
278 l1 = gen_new_label();
279 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
280 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], SR_T, l1);
281 gen_goto_tb(ctx, 0, ifnott);
282 gen_set_label(l1);
283 gen_goto_tb(ctx, 1, ift);
286 /* Delayed conditional jump (bt or bf) */
287 static void gen_delayed_conditional_jump(DisasContext * ctx)
289 int l1;
291 l1 = gen_new_label();
292 tcg_gen_andi_i32(cpu_T[0], cpu_flags, DELAY_SLOT_TRUE);
293 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], DELAY_SLOT_TRUE, l1);
294 gen_goto_tb(ctx, 1, ctx->pc + 2);
295 gen_set_label(l1);
296 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
297 gen_jump(ctx);
300 static inline void gen_set_t(void)
302 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
305 static inline void gen_clr_t(void)
307 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
310 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
312 int label1 = gen_new_label();
313 int label2 = gen_new_label();
314 tcg_gen_brcond_i32(cond, t1, t0, label1);
315 gen_clr_t();
316 tcg_gen_br(label2);
317 gen_set_label(label1);
318 gen_set_t();
319 gen_set_label(label2);
322 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
324 int label1 = gen_new_label();
325 int label2 = gen_new_label();
326 tcg_gen_brcondi_i32(cond, t0, imm, label1);
327 gen_clr_t();
328 tcg_gen_br(label2);
329 gen_set_label(label1);
330 gen_set_t();
331 gen_set_label(label2);
334 static inline void gen_store_flags(uint32_t flags)
336 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
337 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
340 #define B3_0 (ctx->opcode & 0xf)
341 #define B6_4 ((ctx->opcode >> 4) & 0x7)
342 #define B7_4 ((ctx->opcode >> 4) & 0xf)
343 #define B7_0 (ctx->opcode & 0xff)
344 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
345 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
346 (ctx->opcode & 0xfff))
347 #define B11_8 ((ctx->opcode >> 8) & 0xf)
348 #define B15_12 ((ctx->opcode >> 12) & 0xf)
350 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
351 (x) + 16 : (x))
353 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
354 ? (x) + 16 : (x))
356 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
357 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
358 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
359 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
361 #define CHECK_NOT_DELAY_SLOT \
362 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
363 {tcg_gen_helper_0_0(helper_raise_slot_illegal_instruction); ctx->bstate = BS_EXCP; \
364 return;}
366 void _decode_opc(DisasContext * ctx)
368 #if 0
369 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
370 #endif
371 switch (ctx->opcode) {
372 case 0x0019: /* div0u */
373 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
374 return;
375 case 0x000b: /* rts */
376 CHECK_NOT_DELAY_SLOT
377 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
378 ctx->flags |= DELAY_SLOT;
379 ctx->delayed_pc = (uint32_t) - 1;
380 return;
381 case 0x0028: /* clrmac */
382 tcg_gen_movi_i32(cpu_mach, 0);
383 tcg_gen_movi_i32(cpu_macl, 0);
384 return;
385 case 0x0048: /* clrs */
386 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
387 return;
388 case 0x0008: /* clrt */
389 gen_clr_t();
390 return;
391 case 0x0038: /* ldtlb */
392 #if defined(CONFIG_USER_ONLY)
393 assert(0); /* XXXXX */
394 #else
395 tcg_gen_helper_0_0(helper_ldtlb);
396 #endif
397 return;
398 case 0x002b: /* rte */
399 CHECK_NOT_DELAY_SLOT
400 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
401 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
402 ctx->flags |= DELAY_SLOT;
403 ctx->delayed_pc = (uint32_t) - 1;
404 return;
405 case 0x0058: /* sets */
406 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
407 return;
408 case 0x0018: /* sett */
409 gen_set_t();
410 return;
411 case 0xfbfd: /* frchg */
412 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
413 ctx->bstate = BS_STOP;
414 return;
415 case 0xf3fd: /* fschg */
416 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
417 ctx->bstate = BS_STOP;
418 return;
419 case 0x0009: /* nop */
420 return;
421 case 0x001b: /* sleep */
422 if (ctx->memidx) {
423 tcg_gen_helper_0_0(helper_sleep);
424 } else {
425 tcg_gen_helper_0_0(helper_raise_illegal_instruction);
426 ctx->bstate = BS_EXCP;
428 return;
431 switch (ctx->opcode & 0xf000) {
432 case 0x1000: /* mov.l Rm,@(disp,Rn) */
433 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
434 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
435 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 4);
436 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
437 return;
438 case 0x5000: /* mov.l @(disp,Rm),Rn */
439 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
440 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 4);
441 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
442 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
443 return;
444 case 0xe000: /* mov #imm,Rn */
445 tcg_gen_movi_i32(cpu_gregs[REG(B11_8)], B7_0s);
446 return;
447 case 0x9000: /* mov.w @(disp,PC),Rn */
448 tcg_gen_movi_i32(cpu_T[0], ctx->pc + 4 + B7_0 * 2);
449 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
450 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
451 return;
452 case 0xd000: /* mov.l @(disp,PC),Rn */
453 tcg_gen_movi_i32(cpu_T[0], (ctx->pc + 4 + B7_0 * 4) & ~3);
454 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
455 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
456 return;
457 case 0x7000: /* add #imm,Rn */
458 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], B7_0s);
459 return;
460 case 0xa000: /* bra disp */
461 CHECK_NOT_DELAY_SLOT
462 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
463 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
464 ctx->flags |= DELAY_SLOT;
465 return;
466 case 0xb000: /* bsr disp */
467 CHECK_NOT_DELAY_SLOT
468 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
469 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
470 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
471 ctx->flags |= DELAY_SLOT;
472 return;
475 switch (ctx->opcode & 0xf00f) {
476 case 0x6003: /* mov Rm,Rn */
477 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
478 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
479 return;
480 case 0x2000: /* mov.b Rm,@Rn */
481 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
482 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
483 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
484 return;
485 case 0x2001: /* mov.w Rm,@Rn */
486 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
487 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
488 tcg_gen_qemu_st16(cpu_T[0], cpu_T[1], ctx->memidx);
489 return;
490 case 0x2002: /* mov.l Rm,@Rn */
491 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
492 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
493 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
494 return;
495 case 0x6000: /* mov.b @Rm,Rn */
496 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
497 tcg_gen_qemu_ld8s(cpu_T[0], cpu_T[0], ctx->memidx);
498 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
499 return;
500 case 0x6001: /* mov.w @Rm,Rn */
501 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
502 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
503 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
504 return;
505 case 0x6002: /* mov.l @Rm,Rn */
506 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
507 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
508 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
509 return;
510 case 0x2004: /* mov.b Rm,@-Rn */
511 tcg_gen_subi_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 1);
512 tcg_gen_qemu_st8(cpu_gregs[REG(B7_4)], cpu_T[1], ctx->memidx); /* might cause re-execution */
513 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
514 cpu_gregs[REG(B11_8)], 1); /* modify register status */
515 return;
516 case 0x2005: /* mov.w Rm,@-Rn */
517 tcg_gen_subi_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 2);
518 tcg_gen_qemu_st16(cpu_gregs[REG(B7_4)], cpu_T[1], ctx->memidx);
519 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
520 cpu_gregs[REG(B11_8)], 2);
521 return;
522 case 0x2006: /* mov.l Rm,@-Rn */
523 tcg_gen_subi_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 4);
524 tcg_gen_qemu_st32(cpu_gregs[REG(B7_4)], cpu_T[1], ctx->memidx);
525 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
526 cpu_gregs[REG(B11_8)], 4);
527 return;
528 case 0x6004: /* mov.b @Rm+,Rn */
529 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
530 tcg_gen_qemu_ld8s(cpu_T[0], cpu_T[0], ctx->memidx);
531 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
532 if ( B11_8 != B7_4 )
533 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
534 cpu_gregs[REG(B7_4)], 1);
535 return;
536 case 0x6005: /* mov.w @Rm+,Rn */
537 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
538 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
539 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
540 if ( B11_8 != B7_4 )
541 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
542 cpu_gregs[REG(B7_4)], 2);
543 return;
544 case 0x6006: /* mov.l @Rm+,Rn */
545 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
546 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
547 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
548 if ( B11_8 != B7_4 )
549 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
550 cpu_gregs[REG(B7_4)], 4);
551 return;
552 case 0x0004: /* mov.b Rm,@(R0,Rn) */
553 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
554 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
555 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
556 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
557 return;
558 case 0x0005: /* mov.w Rm,@(R0,Rn) */
559 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
560 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
561 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
562 tcg_gen_qemu_st16(cpu_T[0], cpu_T[1], ctx->memidx);
563 return;
564 case 0x0006: /* mov.l Rm,@(R0,Rn) */
565 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
566 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
567 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
568 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
569 return;
570 case 0x000c: /* mov.b @(R0,Rm),Rn */
571 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
572 tcg_gen_qemu_ld8s(cpu_T[0], cpu_T[0], ctx->memidx);
573 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
574 return;
575 case 0x000d: /* mov.w @(R0,Rm),Rn */
576 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
577 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
578 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
579 return;
580 case 0x000e: /* mov.l @(R0,Rm),Rn */
581 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
582 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
583 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
584 return;
585 case 0x6008: /* swap.b Rm,Rn */
586 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], 0xffff0000);
587 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xff);
588 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 8);
589 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
590 tcg_gen_shri_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 8);
591 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
592 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
593 return;
594 case 0x6009: /* swap.w Rm,Rn */
595 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
596 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
597 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B7_4)], 16);
598 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
599 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
600 return;
601 case 0x200d: /* xtrct Rm,Rn */
602 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
603 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
604 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 16);
605 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
606 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
607 return;
608 case 0x300c: /* add Rm,Rn */
609 tcg_gen_add_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
610 return;
611 case 0x300e: /* addc Rm,Rn */
612 tcg_gen_helper_1_2(helper_addc, cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], cpu_gregs[REG(B11_8)]);
613 return;
614 case 0x300f: /* addv Rm,Rn */
615 tcg_gen_helper_1_2(helper_addv, cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], cpu_gregs[REG(B11_8)]);
616 return;
617 case 0x2009: /* and Rm,Rn */
618 tcg_gen_and_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
619 return;
620 case 0x3000: /* cmp/eq Rm,Rn */
621 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
622 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
623 gen_cmp(TCG_COND_EQ, cpu_T[0], cpu_T[1]);
624 return;
625 case 0x3003: /* cmp/ge Rm,Rn */
626 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
627 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
628 gen_cmp(TCG_COND_GE, cpu_T[0], cpu_T[1]);
629 return;
630 case 0x3007: /* cmp/gt Rm,Rn */
631 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
632 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
633 gen_cmp(TCG_COND_GT, cpu_T[0], cpu_T[1]);
634 return;
635 case 0x3006: /* cmp/hi Rm,Rn */
636 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
637 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
638 gen_cmp(TCG_COND_GTU, cpu_T[0], cpu_T[1]);
639 return;
640 case 0x3002: /* cmp/hs Rm,Rn */
641 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
642 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
643 gen_cmp(TCG_COND_GEU, cpu_T[0], cpu_T[1]);
644 return;
645 case 0x200c: /* cmp/str Rm,Rn */
646 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
647 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
648 gen_op_cmp_str_T0_T1();
649 return;
650 case 0x2007: /* div0s Rm,Rn */
651 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
652 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
653 gen_op_div0s_T0_T1();
654 return;
655 case 0x3004: /* div1 Rm,Rn */
656 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
657 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
658 gen_op_div1_T0_T1();
659 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
660 return;
661 case 0x300d: /* dmuls.l Rm,Rn */
663 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
664 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
666 tcg_gen_ext_i32_i64(tmp1, cpu_gregs[REG(B7_4)]);
667 tcg_gen_ext_i32_i64(tmp2, cpu_gregs[REG(B11_8)]);
668 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
669 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
670 tcg_gen_shri_i64(tmp1, tmp1, 32);
671 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
673 tcg_temp_free(tmp1);
674 tcg_temp_free(tmp2);
676 return;
677 case 0x3005: /* dmulu.l Rm,Rn */
679 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
680 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
682 tcg_gen_extu_i32_i64(tmp1, cpu_gregs[REG(B7_4)]);
683 tcg_gen_extu_i32_i64(tmp2, cpu_gregs[REG(B11_8)]);
684 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
685 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
686 tcg_gen_shri_i64(tmp1, tmp1, 32);
687 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
689 tcg_temp_free(tmp1);
690 tcg_temp_free(tmp2);
692 return;
693 case 0x600e: /* exts.b Rm,Rn */
694 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
695 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
696 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
697 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
698 return;
699 case 0x600f: /* exts.w Rm,Rn */
700 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
701 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
702 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
703 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
704 return;
705 case 0x600c: /* extu.b Rm,Rn */
706 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
707 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
708 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
709 return;
710 case 0x600d: /* extu.w Rm,Rn */
711 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
712 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
713 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
714 return;
715 case 0x000f: /* mac.l @Rm+,@Rn+ */
716 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
717 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
718 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
719 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
720 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
721 tcg_gen_helper_0_2(helper_macl, cpu_T[0], cpu_T[1]);
722 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 4);
723 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
724 return;
725 case 0x400f: /* mac.w @Rm+,@Rn+ */
726 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
727 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
728 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
729 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
730 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
731 tcg_gen_helper_0_2(helper_macw, cpu_T[0], cpu_T[1]);
732 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
733 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 2);
734 return;
735 case 0x0007: /* mul.l Rm,Rn */
736 tcg_gen_mul_i32(cpu_macl, cpu_gregs[REG(B7_4)], cpu_gregs[REG(B11_8)]);
737 return;
738 case 0x200f: /* muls.w Rm,Rn */
739 tcg_gen_ext16s_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
740 tcg_gen_ext16s_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
741 tcg_gen_mul_i32(cpu_macl, cpu_T[0], cpu_T[1]);
742 return;
743 case 0x200e: /* mulu.w Rm,Rn */
744 tcg_gen_ext16u_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
745 tcg_gen_ext16u_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
746 tcg_gen_mul_i32(cpu_macl, cpu_T[0], cpu_T[1]);
747 return;
748 case 0x600b: /* neg Rm,Rn */
749 tcg_gen_neg_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
750 return;
751 case 0x600a: /* negc Rm,Rn */
752 tcg_gen_helper_1_1(helper_negc, cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
753 return;
754 case 0x6007: /* not Rm,Rn */
755 tcg_gen_not_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
756 return;
757 case 0x200b: /* or Rm,Rn */
758 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
759 return;
760 case 0x400c: /* shad Rm,Rn */
761 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
762 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
763 gen_op_shad_T0_T1();
764 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
765 return;
766 case 0x400d: /* shld Rm,Rn */
767 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
768 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
769 gen_op_shld_T0_T1();
770 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
771 return;
772 case 0x3008: /* sub Rm,Rn */
773 tcg_gen_sub_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
774 return;
775 case 0x300a: /* subc Rm,Rn */
776 tcg_gen_helper_1_2(helper_subc, cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], cpu_gregs[REG(B11_8)]);
777 return;
778 case 0x300b: /* subv Rm,Rn */
779 tcg_gen_helper_1_2(helper_subv, cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], cpu_gregs[REG(B11_8)]);
780 return;
781 case 0x2008: /* tst Rm,Rn */
782 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
783 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
784 tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]);
785 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
786 return;
787 case 0x200a: /* xor Rm,Rn */
788 tcg_gen_xor_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
789 return;
790 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
791 if (ctx->fpscr & FPSCR_SZ) {
792 gen_op_fmov_drN_DT0(XREG(B7_4));
793 gen_op_fmov_DT0_drN(XREG(B11_8));
794 } else {
795 gen_op_fmov_frN_FT0(FREG(B7_4));
796 gen_op_fmov_FT0_frN(FREG(B11_8));
798 return;
799 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
800 if (ctx->fpscr & FPSCR_SZ) {
801 gen_op_fmov_drN_DT0(XREG(B7_4));
802 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
803 gen_op_stfq_DT0_T1(ctx);
804 } else {
805 gen_op_fmov_frN_FT0(FREG(B7_4));
806 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
807 gen_op_stfl_FT0_T1(ctx);
809 return;
810 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
811 if (ctx->fpscr & FPSCR_SZ) {
812 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
813 gen_op_ldfq_T0_DT0(ctx);
814 gen_op_fmov_DT0_drN(XREG(B11_8));
815 } else {
816 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
817 gen_op_ldfl_T0_FT0(ctx);
818 gen_op_fmov_FT0_frN(FREG(B11_8));
820 return;
821 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
822 if (ctx->fpscr & FPSCR_SZ) {
823 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
824 gen_op_ldfq_T0_DT0(ctx);
825 gen_op_fmov_DT0_drN(XREG(B11_8));
826 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
827 cpu_gregs[REG(B7_4)], 8);
828 } else {
829 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
830 gen_op_ldfl_T0_FT0(ctx);
831 gen_op_fmov_FT0_frN(FREG(B11_8));
832 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
833 cpu_gregs[REG(B7_4)], 4);
835 return;
836 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
837 if (ctx->fpscr & FPSCR_SZ) {
838 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
839 gen_op_fmov_drN_DT0(XREG(B7_4));
840 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
841 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
842 gen_op_stfq_DT0_T1(ctx);
843 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
844 } else {
845 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
846 gen_op_fmov_frN_FT0(FREG(B7_4));
847 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
848 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
849 gen_op_stfl_FT0_T1(ctx);
850 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
852 return;
853 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
854 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
855 if (ctx->fpscr & FPSCR_SZ) {
856 gen_op_ldfq_T0_DT0(ctx);
857 gen_op_fmov_DT0_drN(XREG(B11_8));
858 } else {
859 gen_op_ldfl_T0_FT0(ctx);
860 gen_op_fmov_FT0_frN(FREG(B11_8));
862 return;
863 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
864 if (ctx->fpscr & FPSCR_SZ) {
865 gen_op_fmov_drN_DT0(XREG(B7_4));
866 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
867 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
868 gen_op_stfq_DT0_T1(ctx);
869 } else {
870 gen_op_fmov_frN_FT0(FREG(B7_4));
871 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
872 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
873 gen_op_stfl_FT0_T1(ctx);
875 return;
876 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
877 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
878 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
879 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
880 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
881 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
882 if (ctx->fpscr & FPSCR_PR) {
883 if (ctx->opcode & 0x0110)
884 break; /* illegal instruction */
885 gen_op_fmov_drN_DT1(DREG(B7_4));
886 gen_op_fmov_drN_DT0(DREG(B11_8));
888 else {
889 gen_op_fmov_frN_FT1(FREG(B7_4));
890 gen_op_fmov_frN_FT0(FREG(B11_8));
893 switch (ctx->opcode & 0xf00f) {
894 case 0xf000: /* fadd Rm,Rn */
895 ctx->fpscr & FPSCR_PR ? gen_op_fadd_DT() : gen_op_fadd_FT();
896 break;
897 case 0xf001: /* fsub Rm,Rn */
898 ctx->fpscr & FPSCR_PR ? gen_op_fsub_DT() : gen_op_fsub_FT();
899 break;
900 case 0xf002: /* fmul Rm,Rn */
901 ctx->fpscr & FPSCR_PR ? gen_op_fmul_DT() : gen_op_fmul_FT();
902 break;
903 case 0xf003: /* fdiv Rm,Rn */
904 ctx->fpscr & FPSCR_PR ? gen_op_fdiv_DT() : gen_op_fdiv_FT();
905 break;
906 case 0xf004: /* fcmp/eq Rm,Rn */
907 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_eq_DT() : gen_op_fcmp_eq_FT();
908 return;
909 case 0xf005: /* fcmp/gt Rm,Rn */
910 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_gt_DT() : gen_op_fcmp_gt_FT();
911 return;
914 if (ctx->fpscr & FPSCR_PR) {
915 gen_op_fmov_DT0_drN(DREG(B11_8));
917 else {
918 gen_op_fmov_FT0_frN(FREG(B11_8));
920 return;
923 switch (ctx->opcode & 0xff00) {
924 case 0xc900: /* and #imm,R0 */
925 tcg_gen_andi_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
926 return;
927 case 0xcd00: /* and.b #imm,@(R0,GBR) */
928 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
929 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
930 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
931 tcg_gen_qemu_ld8u(cpu_T[0], cpu_T[0], ctx->memidx);
932 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
933 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
934 return;
935 case 0x8b00: /* bf label */
936 CHECK_NOT_DELAY_SLOT
937 gen_conditional_jump(ctx, ctx->pc + 2,
938 ctx->pc + 4 + B7_0s * 2);
939 ctx->bstate = BS_BRANCH;
940 return;
941 case 0x8f00: /* bf/s label */
942 CHECK_NOT_DELAY_SLOT
943 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
944 ctx->flags |= DELAY_SLOT_CONDITIONAL;
945 return;
946 case 0x8900: /* bt label */
947 CHECK_NOT_DELAY_SLOT
948 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
949 ctx->pc + 2);
950 ctx->bstate = BS_BRANCH;
951 return;
952 case 0x8d00: /* bt/s label */
953 CHECK_NOT_DELAY_SLOT
954 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
955 ctx->flags |= DELAY_SLOT_CONDITIONAL;
956 return;
957 case 0x8800: /* cmp/eq #imm,R0 */
958 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
959 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], B7_0s);
960 return;
961 case 0xc400: /* mov.b @(disp,GBR),R0 */
962 gen_op_stc_gbr_T0();
963 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
964 tcg_gen_qemu_ld8s(cpu_T[0], cpu_T[0], ctx->memidx);
965 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
966 return;
967 case 0xc500: /* mov.w @(disp,GBR),R0 */
968 gen_op_stc_gbr_T0();
969 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
970 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
971 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
972 return;
973 case 0xc600: /* mov.l @(disp,GBR),R0 */
974 gen_op_stc_gbr_T0();
975 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
976 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
977 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
978 return;
979 case 0xc000: /* mov.b R0,@(disp,GBR) */
980 gen_op_stc_gbr_T0();
981 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
982 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
983 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
984 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
985 return;
986 case 0xc100: /* mov.w R0,@(disp,GBR) */
987 gen_op_stc_gbr_T0();
988 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
989 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
990 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
991 tcg_gen_qemu_st16(cpu_T[0], cpu_T[1], ctx->memidx);
992 return;
993 case 0xc200: /* mov.l R0,@(disp,GBR) */
994 gen_op_stc_gbr_T0();
995 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
996 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
997 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
998 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
999 return;
1000 case 0x8000: /* mov.b R0,@(disp,Rn) */
1001 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1002 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1003 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0);
1004 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
1005 return;
1006 case 0x8100: /* mov.w R0,@(disp,Rn) */
1007 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1008 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1009 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 2);
1010 tcg_gen_qemu_st16(cpu_T[0], cpu_T[1], ctx->memidx);
1011 return;
1012 case 0x8400: /* mov.b @(disp,Rn),R0 */
1013 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1014 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0);
1015 tcg_gen_qemu_ld8s(cpu_T[0], cpu_T[0], ctx->memidx);
1016 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1017 return;
1018 case 0x8500: /* mov.w @(disp,Rn),R0 */
1019 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1020 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 2);
1021 tcg_gen_qemu_ld16s(cpu_T[0], cpu_T[0], ctx->memidx);
1022 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1023 return;
1024 case 0xc700: /* mova @(disp,PC),R0 */
1025 tcg_gen_movi_i32(cpu_gregs[REG(0)],
1026 ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1027 return;
1028 case 0xcb00: /* or #imm,R0 */
1029 tcg_gen_ori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1030 return;
1031 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1032 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1033 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1034 tcg_gen_mov_i32(cpu_T[0], cpu_T[1]);
1035 tcg_gen_qemu_ld8u(cpu_T[0], cpu_T[0], ctx->memidx);
1036 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], B7_0);
1037 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
1038 return;
1039 case 0xc300: /* trapa #imm */
1040 CHECK_NOT_DELAY_SLOT
1041 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1042 tcg_gen_movi_i32(cpu_T[0], B7_0);
1043 tcg_gen_helper_0_1(helper_trapa, cpu_T[0]);
1044 ctx->bstate = BS_BRANCH;
1045 return;
1046 case 0xc800: /* tst #imm,R0 */
1047 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(0)], B7_0);
1048 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1049 return;
1050 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1051 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1052 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1053 tcg_gen_qemu_ld8u(cpu_T[0], cpu_T[0], ctx->memidx);
1054 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
1055 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1056 return;
1057 case 0xca00: /* xor #imm,R0 */
1058 tcg_gen_xori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1059 return;
1060 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1061 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1062 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1063 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1064 tcg_gen_qemu_ld8u(cpu_T[0], cpu_T[0], ctx->memidx);
1065 tcg_gen_xori_i32(cpu_T[0], cpu_T[0], B7_0);
1066 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
1067 return;
1070 switch (ctx->opcode & 0xf08f) {
1071 case 0x408e: /* ldc Rm,Rn_BANK */
1072 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1073 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1074 return;
1075 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1076 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1077 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
1078 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1079 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1080 return;
1081 case 0x0082: /* stc Rm_BANK,Rn */
1082 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1083 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
1084 return;
1085 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1086 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1087 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1088 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1089 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1090 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
1091 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1092 return;
1095 switch (ctx->opcode & 0xf0ff) {
1096 case 0x0023: /* braf Rn */
1097 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1098 tcg_gen_addi_i32(cpu_delayed_pc, cpu_T[0], ctx->pc + 4);
1099 ctx->flags |= DELAY_SLOT;
1100 ctx->delayed_pc = (uint32_t) - 1;
1101 return;
1102 case 0x0003: /* bsrf Rn */
1103 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1104 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1105 tcg_gen_add_i32(cpu_delayed_pc, cpu_T[0], cpu_pr);
1106 ctx->flags |= DELAY_SLOT;
1107 ctx->delayed_pc = (uint32_t) - 1;
1108 return;
1109 case 0x4015: /* cmp/pl Rn */
1110 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1111 gen_cmp_imm(TCG_COND_GT, cpu_T[0], 0);
1112 return;
1113 case 0x4011: /* cmp/pz Rn */
1114 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1115 gen_cmp_imm(TCG_COND_GE, cpu_T[0], 0);
1116 return;
1117 case 0x4010: /* dt Rn */
1118 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 1);
1119 gen_cmp_imm(TCG_COND_EQ, cpu_gregs[REG(B11_8)], 0);
1120 return;
1121 case 0x402b: /* jmp @Rn */
1122 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1123 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1124 ctx->flags |= DELAY_SLOT;
1125 ctx->delayed_pc = (uint32_t) - 1;
1126 return;
1127 case 0x400b: /* jsr @Rn */
1128 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1129 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1130 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1131 ctx->flags |= DELAY_SLOT;
1132 ctx->delayed_pc = (uint32_t) - 1;
1133 return;
1134 #define LDST(reg,ldnum,ldpnum,ldop,stnum,stpnum,stop,extrald) \
1135 case ldnum: \
1136 tcg_gen_mov_i32 (cpu_T[0], cpu_gregs[REG(B11_8)]); \
1137 gen_op_##ldop##_T0_##reg (); \
1138 extrald \
1139 return; \
1140 case ldpnum: \
1141 tcg_gen_qemu_ld32s (cpu_T[0], cpu_gregs[REG(B11_8)], ctx->memidx); \
1142 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], \
1143 cpu_gregs[REG(B11_8)], 4); \
1144 gen_op_##ldop##_T0_##reg (); \
1145 extrald \
1146 return; \
1147 case stnum: \
1148 gen_op_##stop##_##reg##_T0 (); \
1149 tcg_gen_mov_i32 (cpu_gregs[REG(B11_8)], cpu_T[0]); \
1150 return; \
1151 case stpnum: \
1152 gen_op_##stop##_##reg##_T0 (); \
1153 tcg_gen_subi_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 4); \
1154 tcg_gen_qemu_st32 (cpu_T[0], cpu_T[1], ctx->memidx); \
1155 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], \
1156 cpu_gregs[REG(B11_8)], 4); \
1157 return;
1158 LDST(sr, 0x400e, 0x4007, ldc, 0x0002, 0x4003, stc, ctx->bstate =
1159 BS_STOP;)
1160 LDST(gbr, 0x401e, 0x4017, ldc, 0x0012, 0x4013, stc,)
1161 LDST(vbr, 0x402e, 0x4027, ldc, 0x0022, 0x4023, stc,)
1162 LDST(ssr, 0x403e, 0x4037, ldc, 0x0032, 0x4033, stc,)
1163 LDST(spc, 0x404e, 0x4047, ldc, 0x0042, 0x4043, stc,)
1164 LDST(dbr, 0x40fa, 0x40f6, ldc, 0x00fa, 0x40f2, stc,)
1165 LDST(mach, 0x400a, 0x4006, lds, 0x000a, 0x4002, sts,)
1166 LDST(macl, 0x401a, 0x4016, lds, 0x001a, 0x4012, sts,)
1167 LDST(pr, 0x402a, 0x4026, lds, 0x002a, 0x4022, sts,)
1168 LDST(fpul, 0x405a, 0x4056, lds, 0x005a, 0x4052, sts,)
1169 LDST(fpscr, 0x406a, 0x4066, lds, 0x006a, 0x4062, sts, ctx->bstate =
1170 BS_STOP;)
1171 case 0x00c3: /* movca.l R0,@Rm */
1172 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1173 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1174 tcg_gen_qemu_st32(cpu_T[0], cpu_T[1], ctx->memidx);
1175 return;
1176 case 0x0029: /* movt Rn */
1177 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_sr, SR_T);
1178 return;
1179 case 0x0093: /* ocbi @Rn */
1180 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1181 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
1182 return;
1183 case 0x00a3: /* ocbp @Rn */
1184 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1185 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
1186 return;
1187 case 0x00b3: /* ocbwb @Rn */
1188 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1189 tcg_gen_qemu_ld32s(cpu_T[0], cpu_T[0], ctx->memidx);
1190 return;
1191 case 0x0083: /* pref @Rn */
1192 return;
1193 case 0x4024: /* rotcl Rn */
1194 gen_op_rotcl_Rn(REG(B11_8));
1195 return;
1196 case 0x4025: /* rotcr Rn */
1197 gen_op_rotcr_Rn(REG(B11_8));
1198 return;
1199 case 0x4004: /* rotl Rn */
1200 gen_op_rotl_Rn(REG(B11_8));
1201 return;
1202 case 0x4005: /* rotr Rn */
1203 gen_op_rotr_Rn(REG(B11_8));
1204 return;
1205 case 0x4000: /* shll Rn */
1206 case 0x4020: /* shal Rn */
1207 gen_op_shal_Rn(REG(B11_8));
1208 return;
1209 case 0x4021: /* shar Rn */
1210 gen_op_shar_Rn(REG(B11_8));
1211 return;
1212 case 0x4001: /* shlr Rn */
1213 gen_op_shlr_Rn(REG(B11_8));
1214 return;
1215 case 0x4008: /* shll2 Rn */
1216 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1217 return;
1218 case 0x4018: /* shll8 Rn */
1219 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1220 return;
1221 case 0x4028: /* shll16 Rn */
1222 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1223 return;
1224 case 0x4009: /* shlr2 Rn */
1225 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1226 return;
1227 case 0x4019: /* shlr8 Rn */
1228 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1229 return;
1230 case 0x4029: /* shlr16 Rn */
1231 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1232 return;
1233 case 0x401b: /* tas.b @Rn */
1234 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1235 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1236 tcg_gen_qemu_ld8u(cpu_T[0], cpu_T[0], ctx->memidx);
1237 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1238 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], 0x80);
1239 tcg_gen_qemu_st8(cpu_T[0], cpu_T[1], ctx->memidx);
1240 return;
1241 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1242 gen_op_movl_fpul_FT0();
1243 gen_op_fmov_FT0_frN(FREG(B11_8));
1244 return;
1245 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1246 gen_op_fmov_frN_FT0(FREG(B11_8));
1247 gen_op_movl_FT0_fpul();
1248 return;
1249 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1250 if (ctx->fpscr & FPSCR_PR) {
1251 if (ctx->opcode & 0x0100)
1252 break; /* illegal instruction */
1253 gen_op_float_DT();
1254 gen_op_fmov_DT0_drN(DREG(B11_8));
1256 else {
1257 gen_op_float_FT();
1258 gen_op_fmov_FT0_frN(FREG(B11_8));
1260 return;
1261 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1262 if (ctx->fpscr & FPSCR_PR) {
1263 if (ctx->opcode & 0x0100)
1264 break; /* illegal instruction */
1265 gen_op_fmov_drN_DT0(DREG(B11_8));
1266 gen_op_ftrc_DT();
1268 else {
1269 gen_op_fmov_frN_FT0(FREG(B11_8));
1270 gen_op_ftrc_FT();
1272 return;
1273 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1274 gen_op_fneg_frN(FREG(B11_8));
1275 return;
1276 case 0xf05d: /* fabs FRn/DRn */
1277 if (ctx->fpscr & FPSCR_PR) {
1278 if (ctx->opcode & 0x0100)
1279 break; /* illegal instruction */
1280 gen_op_fmov_drN_DT0(DREG(B11_8));
1281 gen_op_fabs_DT();
1282 gen_op_fmov_DT0_drN(DREG(B11_8));
1283 } else {
1284 gen_op_fmov_frN_FT0(FREG(B11_8));
1285 gen_op_fabs_FT();
1286 gen_op_fmov_FT0_frN(FREG(B11_8));
1288 return;
1289 case 0xf06d: /* fsqrt FRn */
1290 if (ctx->fpscr & FPSCR_PR) {
1291 if (ctx->opcode & 0x0100)
1292 break; /* illegal instruction */
1293 gen_op_fmov_drN_DT0(FREG(B11_8));
1294 gen_op_fsqrt_DT();
1295 gen_op_fmov_DT0_drN(FREG(B11_8));
1296 } else {
1297 gen_op_fmov_frN_FT0(FREG(B11_8));
1298 gen_op_fsqrt_FT();
1299 gen_op_fmov_FT0_frN(FREG(B11_8));
1301 return;
1302 case 0xf07d: /* fsrra FRn */
1303 break;
1304 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1305 if (!(ctx->fpscr & FPSCR_PR)) {
1306 tcg_gen_movi_i32(cpu_T[0], 0);
1307 gen_op_fmov_T0_frN(FREG(B11_8));
1308 return;
1310 break;
1311 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1312 if (!(ctx->fpscr & FPSCR_PR)) {
1313 tcg_gen_movi_i32(cpu_T[0], 0x3f800000);
1314 gen_op_fmov_T0_frN(FREG(B11_8));
1315 return;
1317 break;
1318 case 0xf0ad: /* fcnvsd FPUL,DRn */
1319 gen_op_movl_fpul_FT0();
1320 gen_op_fcnvsd_FT_DT();
1321 gen_op_fmov_DT0_drN(DREG(B11_8));
1322 return;
1323 case 0xf0bd: /* fcnvds DRn,FPUL */
1324 gen_op_fmov_drN_DT0(DREG(B11_8));
1325 gen_op_fcnvds_DT_FT();
1326 gen_op_movl_FT0_fpul();
1327 return;
1330 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1331 ctx->opcode, ctx->pc);
1332 tcg_gen_helper_0_0(helper_raise_illegal_instruction);
1333 ctx->bstate = BS_EXCP;
1336 void decode_opc(DisasContext * ctx)
1338 uint32_t old_flags = ctx->flags;
1340 _decode_opc(ctx);
1342 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1343 if (ctx->flags & DELAY_SLOT_CLEARME) {
1344 gen_store_flags(0);
1345 } else {
1346 /* go out of the delay slot */
1347 uint32_t new_flags = ctx->flags;
1348 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1349 gen_store_flags(new_flags);
1351 ctx->flags = 0;
1352 ctx->bstate = BS_BRANCH;
1353 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1354 gen_delayed_conditional_jump(ctx);
1355 } else if (old_flags & DELAY_SLOT) {
1356 gen_jump(ctx);
1361 /* go into a delay slot */
1362 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1363 gen_store_flags(ctx->flags);
1366 static inline void
1367 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1368 int search_pc)
1370 DisasContext ctx;
1371 target_ulong pc_start;
1372 static uint16_t *gen_opc_end;
1373 int i, ii;
1374 int num_insns;
1375 int max_insns;
1377 pc_start = tb->pc;
1378 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1379 ctx.pc = pc_start;
1380 ctx.flags = (uint32_t)tb->flags;
1381 ctx.bstate = BS_NONE;
1382 ctx.sr = env->sr;
1383 ctx.fpscr = env->fpscr;
1384 ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
1385 /* We don't know if the delayed pc came from a dynamic or static branch,
1386 so assume it is a dynamic branch. */
1387 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1388 ctx.tb = tb;
1389 ctx.singlestep_enabled = env->singlestep_enabled;
1391 #ifdef DEBUG_DISAS
1392 if (loglevel & CPU_LOG_TB_CPU) {
1393 fprintf(logfile,
1394 "------------------------------------------------\n");
1395 cpu_dump_state(env, logfile, fprintf, 0);
1397 #endif
1399 ii = -1;
1400 num_insns = 0;
1401 max_insns = tb->cflags & CF_COUNT_MASK;
1402 if (max_insns == 0)
1403 max_insns = CF_COUNT_MASK;
1404 gen_icount_start();
1405 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1406 if (env->nb_breakpoints > 0) {
1407 for (i = 0; i < env->nb_breakpoints; i++) {
1408 if (ctx.pc == env->breakpoints[i]) {
1409 /* We have hit a breakpoint - make sure PC is up-to-date */
1410 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1411 tcg_gen_helper_0_0(helper_debug);
1412 ctx.bstate = BS_EXCP;
1413 break;
1417 if (search_pc) {
1418 i = gen_opc_ptr - gen_opc_buf;
1419 if (ii < i) {
1420 ii++;
1421 while (ii < i)
1422 gen_opc_instr_start[ii++] = 0;
1424 gen_opc_pc[ii] = ctx.pc;
1425 gen_opc_hflags[ii] = ctx.flags;
1426 gen_opc_instr_start[ii] = 1;
1427 gen_opc_icount[ii] = num_insns;
1429 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1430 gen_io_start();
1431 #if 0
1432 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1433 fflush(stderr);
1434 #endif
1435 ctx.opcode = lduw_code(ctx.pc);
1436 decode_opc(&ctx);
1437 num_insns++;
1438 ctx.pc += 2;
1439 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1440 break;
1441 if (env->singlestep_enabled)
1442 break;
1443 if (num_insns >= max_insns)
1444 break;
1445 #ifdef SH4_SINGLE_STEP
1446 break;
1447 #endif
1449 if (tb->cflags & CF_LAST_IO)
1450 gen_io_end();
1451 if (env->singlestep_enabled) {
1452 tcg_gen_helper_0_0(helper_debug);
1453 } else {
1454 switch (ctx.bstate) {
1455 case BS_STOP:
1456 /* gen_op_interrupt_restart(); */
1457 /* fall through */
1458 case BS_NONE:
1459 if (ctx.flags) {
1460 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1462 gen_goto_tb(&ctx, 0, ctx.pc);
1463 break;
1464 case BS_EXCP:
1465 /* gen_op_interrupt_restart(); */
1466 tcg_gen_exit_tb(0);
1467 break;
1468 case BS_BRANCH:
1469 default:
1470 break;
1474 gen_icount_end(tb, num_insns);
1475 *gen_opc_ptr = INDEX_op_end;
1476 if (search_pc) {
1477 i = gen_opc_ptr - gen_opc_buf;
1478 ii++;
1479 while (ii <= i)
1480 gen_opc_instr_start[ii++] = 0;
1481 } else {
1482 tb->size = ctx.pc - pc_start;
1483 tb->icount = num_insns;
1486 #ifdef DEBUG_DISAS
1487 #ifdef SH4_DEBUG_DISAS
1488 if (loglevel & CPU_LOG_TB_IN_ASM)
1489 fprintf(logfile, "\n");
1490 #endif
1491 if (loglevel & CPU_LOG_TB_IN_ASM) {
1492 fprintf(logfile, "IN:\n"); /* , lookup_symbol(pc_start)); */
1493 target_disas(logfile, pc_start, ctx.pc - pc_start, 0);
1494 fprintf(logfile, "\n");
1496 #endif
1499 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
1501 gen_intermediate_code_internal(env, tb, 0);
1504 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
1506 gen_intermediate_code_internal(env, tb, 1);
1509 void gen_pc_load(CPUState *env, TranslationBlock *tb,
1510 unsigned long searched_pc, int pc_pos, void *puc)
1512 env->pc = gen_opc_pc[pc_pos];
1513 env->flags = gen_opc_hflags[pc_pos];