Enable -Werror by default for git builds on Linux hosts
[qemu.git] / target-sh4 / translate.c
blob1f461b7a4bbf770282d594a5dacbdfc083cae7ae
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
26 #define DEBUG_DISAS
27 #define SH4_DEBUG_DISAS
28 //#define SH4_SINGLE_STEP
30 #include "cpu.h"
31 #include "exec-all.h"
32 #include "disas.h"
33 #include "tcg-op.h"
34 #include "qemu-common.h"
36 #include "helper.h"
37 #define GEN_HELPER 1
38 #include "helper.h"
40 typedef struct DisasContext {
41 struct TranslationBlock *tb;
42 target_ulong pc;
43 uint32_t sr;
44 uint32_t fpscr;
45 uint16_t opcode;
46 uint32_t flags;
47 int bstate;
48 int memidx;
49 uint32_t delayed_pc;
50 int singlestep_enabled;
51 uint32_t features;
52 int has_movcal;
53 } DisasContext;
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(ctx) 1
57 #else
58 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
59 #endif
61 enum {
62 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
63 * exception condition
65 BS_STOP = 1, /* We want to stop translation for any reason */
66 BS_BRANCH = 2, /* We reached a branch condition */
67 BS_EXCP = 3, /* We reached an exception condition */
70 /* global register indexes */
71 static TCGv_ptr cpu_env;
72 static TCGv cpu_gregs[24];
73 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
74 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
75 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
76 static TCGv cpu_fregs[32];
78 /* internal register indexes */
79 static TCGv cpu_flags, cpu_delayed_pc;
81 #include "gen-icount.h"
83 static void sh4_translate_init(void)
85 int i;
86 static int done_init = 0;
87 static const char * const gregnames[24] = {
88 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
89 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
90 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
91 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
92 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
94 static const char * const fregnames[32] = {
95 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
96 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
97 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
98 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
99 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
100 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
101 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
102 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
105 if (done_init)
106 return;
108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
110 for (i = 0; i < 24; i++)
111 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, gregs[i]),
113 gregnames[i]);
115 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUState, pc), "PC");
117 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, sr), "SR");
119 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, ssr), "SSR");
121 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, spc), "SPC");
123 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, gbr), "GBR");
125 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, vbr), "VBR");
127 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, sgr), "SGR");
129 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUState, dbr), "DBR");
131 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUState, mach), "MACH");
133 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUState, macl), "MACL");
135 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUState, pr), "PR");
137 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
138 offsetof(CPUState, fpscr), "FPSCR");
139 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
140 offsetof(CPUState, fpul), "FPUL");
142 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUState, flags), "_flags_");
144 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
145 offsetof(CPUState, delayed_pc),
146 "_delayed_pc_");
147 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
148 offsetof(CPUState, ldst), "_ldst_");
150 for (i = 0; i < 32; i++)
151 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
152 offsetof(CPUState, fregs[i]),
153 fregnames[i]);
155 /* register helpers */
156 #define GEN_HELPER 2
157 #include "helper.h"
159 done_init = 1;
162 void cpu_dump_state(CPUState * env, FILE * f,
163 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
164 int flags)
166 int i;
167 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
168 env->pc, env->sr, env->pr, env->fpscr);
169 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
170 env->spc, env->ssr, env->gbr, env->vbr);
171 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
172 env->sgr, env->dbr, env->delayed_pc, env->fpul);
173 for (i = 0; i < 24; i += 4) {
174 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
175 i, env->gregs[i], i + 1, env->gregs[i + 1],
176 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
178 if (env->flags & DELAY_SLOT) {
179 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
181 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
182 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
183 env->delayed_pc);
187 static void cpu_sh4_reset(CPUSH4State * env)
189 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
190 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
191 log_cpu_state(env, 0);
194 #if defined(CONFIG_USER_ONLY)
195 env->sr = 0;
196 #else
197 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
198 #endif
199 env->vbr = 0;
200 env->pc = 0xA0000000;
201 #if defined(CONFIG_USER_ONLY)
202 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
203 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
204 #else
205 env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
206 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
207 #endif
208 env->mmucr = 0;
211 typedef struct {
212 const char *name;
213 int id;
214 uint32_t pvr;
215 uint32_t prr;
216 uint32_t cvr;
217 uint32_t features;
218 } sh4_def_t;
220 static sh4_def_t sh4_defs[] = {
222 .name = "SH7750R",
223 .id = SH_CPU_SH7750R,
224 .pvr = 0x00050000,
225 .prr = 0x00000100,
226 .cvr = 0x00110000,
227 .features = SH_FEATURE_BCR3_AND_BCR4,
228 }, {
229 .name = "SH7751R",
230 .id = SH_CPU_SH7751R,
231 .pvr = 0x04050005,
232 .prr = 0x00000113,
233 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
234 .features = SH_FEATURE_BCR3_AND_BCR4,
235 }, {
236 .name = "SH7785",
237 .id = SH_CPU_SH7785,
238 .pvr = 0x10300700,
239 .prr = 0x00000200,
240 .cvr = 0x71440211,
241 .features = SH_FEATURE_SH4A,
245 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
247 int i;
249 if (strcasecmp(name, "any") == 0)
250 return &sh4_defs[0];
252 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
253 if (strcasecmp(name, sh4_defs[i].name) == 0)
254 return &sh4_defs[i];
256 return NULL;
259 void sh4_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
261 int i;
263 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
264 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
267 static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
269 env->pvr = def->pvr;
270 env->prr = def->prr;
271 env->cvr = def->cvr;
272 env->id = def->id;
275 CPUSH4State *cpu_sh4_init(const char *cpu_model)
277 CPUSH4State *env;
278 const sh4_def_t *def;
280 def = cpu_sh4_find_by_name(cpu_model);
281 if (!def)
282 return NULL;
283 env = qemu_mallocz(sizeof(CPUSH4State));
284 env->features = def->features;
285 cpu_exec_init(env);
286 env->movcal_backup_tail = &(env->movcal_backup);
287 sh4_translate_init();
288 env->cpu_model_str = cpu_model;
289 cpu_sh4_reset(env);
290 cpu_sh4_register(env, def);
291 tlb_flush(env, 1);
292 qemu_init_vcpu(env);
293 return env;
296 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
298 TranslationBlock *tb;
299 tb = ctx->tb;
301 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
302 !ctx->singlestep_enabled) {
303 /* Use a direct jump if in same page and singlestep not enabled */
304 tcg_gen_goto_tb(n);
305 tcg_gen_movi_i32(cpu_pc, dest);
306 tcg_gen_exit_tb((long) tb + n);
307 } else {
308 tcg_gen_movi_i32(cpu_pc, dest);
309 if (ctx->singlestep_enabled)
310 gen_helper_debug();
311 tcg_gen_exit_tb(0);
315 static void gen_jump(DisasContext * ctx)
317 if (ctx->delayed_pc == (uint32_t) - 1) {
318 /* Target is not statically known, it comes necessarily from a
319 delayed jump as immediate jump are conditinal jumps */
320 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
321 if (ctx->singlestep_enabled)
322 gen_helper_debug();
323 tcg_gen_exit_tb(0);
324 } else {
325 gen_goto_tb(ctx, 0, ctx->delayed_pc);
329 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
331 TCGv sr;
332 int label = gen_new_label();
333 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
334 sr = tcg_temp_new();
335 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
336 tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
337 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
338 gen_set_label(label);
341 /* Immediate conditional jump (bt or bf) */
342 static void gen_conditional_jump(DisasContext * ctx,
343 target_ulong ift, target_ulong ifnott)
345 int l1;
346 TCGv sr;
348 l1 = gen_new_label();
349 sr = tcg_temp_new();
350 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
351 tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
352 gen_goto_tb(ctx, 0, ifnott);
353 gen_set_label(l1);
354 gen_goto_tb(ctx, 1, ift);
357 /* Delayed conditional jump (bt or bf) */
358 static void gen_delayed_conditional_jump(DisasContext * ctx)
360 int l1;
361 TCGv ds;
363 l1 = gen_new_label();
364 ds = tcg_temp_new();
365 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
366 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
367 gen_goto_tb(ctx, 1, ctx->pc + 2);
368 gen_set_label(l1);
369 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
370 gen_jump(ctx);
373 static inline void gen_set_t(void)
375 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
378 static inline void gen_clr_t(void)
380 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
383 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
385 int label1 = gen_new_label();
386 int label2 = gen_new_label();
387 tcg_gen_brcond_i32(cond, t1, t0, label1);
388 gen_clr_t();
389 tcg_gen_br(label2);
390 gen_set_label(label1);
391 gen_set_t();
392 gen_set_label(label2);
395 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
397 int label1 = gen_new_label();
398 int label2 = gen_new_label();
399 tcg_gen_brcondi_i32(cond, t0, imm, label1);
400 gen_clr_t();
401 tcg_gen_br(label2);
402 gen_set_label(label1);
403 gen_set_t();
404 gen_set_label(label2);
407 static inline void gen_store_flags(uint32_t flags)
409 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
410 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
413 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
415 TCGv tmp = tcg_temp_new();
417 p0 &= 0x1f;
418 p1 &= 0x1f;
420 tcg_gen_andi_i32(tmp, t1, (1 << p1));
421 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
422 if (p0 < p1)
423 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
424 else if (p0 > p1)
425 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
426 tcg_gen_or_i32(t0, t0, tmp);
428 tcg_temp_free(tmp);
431 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
433 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
436 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
438 TCGv_i32 tmp = tcg_temp_new_i32();
439 tcg_gen_trunc_i64_i32(tmp, t);
440 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
441 tcg_gen_shri_i64(t, t, 32);
442 tcg_gen_trunc_i64_i32(tmp, t);
443 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
444 tcg_temp_free_i32(tmp);
447 #define B3_0 (ctx->opcode & 0xf)
448 #define B6_4 ((ctx->opcode >> 4) & 0x7)
449 #define B7_4 ((ctx->opcode >> 4) & 0xf)
450 #define B7_0 (ctx->opcode & 0xff)
451 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
452 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
453 (ctx->opcode & 0xfff))
454 #define B11_8 ((ctx->opcode >> 8) & 0xf)
455 #define B15_12 ((ctx->opcode >> 12) & 0xf)
457 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
458 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
460 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
461 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
463 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
464 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
465 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
466 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
468 #define CHECK_NOT_DELAY_SLOT \
469 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
471 tcg_gen_movi_i32(cpu_pc, ctx->pc-2); \
472 gen_helper_raise_slot_illegal_instruction(); \
473 ctx->bstate = BS_EXCP; \
474 return; \
477 #define CHECK_PRIVILEGED \
478 if (IS_USER(ctx)) { \
479 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
480 gen_helper_raise_illegal_instruction(); \
481 ctx->bstate = BS_EXCP; \
482 return; \
485 #define CHECK_FPU_ENABLED \
486 if (ctx->flags & SR_FD) { \
487 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
488 tcg_gen_movi_i32(cpu_pc, ctx->pc-2); \
489 gen_helper_raise_slot_fpu_disable(); \
490 } else { \
491 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
492 gen_helper_raise_fpu_disable(); \
494 ctx->bstate = BS_EXCP; \
495 return; \
498 static void _decode_opc(DisasContext * ctx)
500 /* This code tries to make movcal emulation sufficiently
501 accurate for Linux purposes. This instruction writes
502 memory, and prior to that, always allocates a cache line.
503 It is used in two contexts:
504 - in memcpy, where data is copied in blocks, the first write
505 of to a block uses movca.l for performance.
506 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
507 to flush the cache. Here, the data written by movcal.l is never
508 written to memory, and the data written is just bogus.
510 To simulate this, we simulate movcal.l, we store the value to memory,
511 but we also remember the previous content. If we see ocbi, we check
512 if movcal.l for that address was done previously. If so, the write should
513 not have hit the memory, so we restore the previous content.
514 When we see an instruction that is neither movca.l
515 nor ocbi, the previous content is discarded.
517 To optimize, we only try to flush stores when we're at the start of
518 TB, or if we already saw movca.l in this TB and did not flush stores
519 yet. */
520 if (ctx->has_movcal)
522 int opcode = ctx->opcode & 0xf0ff;
523 if (opcode != 0x0093 /* ocbi */
524 && opcode != 0x00c3 /* movca.l */)
526 gen_helper_discard_movcal_backup ();
527 ctx->has_movcal = 0;
531 #if 0
532 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
533 #endif
535 switch (ctx->opcode) {
536 case 0x0019: /* div0u */
537 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
538 return;
539 case 0x000b: /* rts */
540 CHECK_NOT_DELAY_SLOT
541 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
542 ctx->flags |= DELAY_SLOT;
543 ctx->delayed_pc = (uint32_t) - 1;
544 return;
545 case 0x0028: /* clrmac */
546 tcg_gen_movi_i32(cpu_mach, 0);
547 tcg_gen_movi_i32(cpu_macl, 0);
548 return;
549 case 0x0048: /* clrs */
550 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
551 return;
552 case 0x0008: /* clrt */
553 gen_clr_t();
554 return;
555 case 0x0038: /* ldtlb */
556 CHECK_PRIVILEGED
557 gen_helper_ldtlb();
558 return;
559 case 0x002b: /* rte */
560 CHECK_PRIVILEGED
561 CHECK_NOT_DELAY_SLOT
562 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
563 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
564 ctx->flags |= DELAY_SLOT;
565 ctx->delayed_pc = (uint32_t) - 1;
566 return;
567 case 0x0058: /* sets */
568 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
569 return;
570 case 0x0018: /* sett */
571 gen_set_t();
572 return;
573 case 0xfbfd: /* frchg */
574 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
575 ctx->bstate = BS_STOP;
576 return;
577 case 0xf3fd: /* fschg */
578 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
579 ctx->bstate = BS_STOP;
580 return;
581 case 0x0009: /* nop */
582 return;
583 case 0x001b: /* sleep */
584 CHECK_PRIVILEGED
585 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
586 return;
589 switch (ctx->opcode & 0xf000) {
590 case 0x1000: /* mov.l Rm,@(disp,Rn) */
592 TCGv addr = tcg_temp_new();
593 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
594 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
595 tcg_temp_free(addr);
597 return;
598 case 0x5000: /* mov.l @(disp,Rm),Rn */
600 TCGv addr = tcg_temp_new();
601 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
602 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
603 tcg_temp_free(addr);
605 return;
606 case 0xe000: /* mov #imm,Rn */
607 tcg_gen_movi_i32(REG(B11_8), B7_0s);
608 return;
609 case 0x9000: /* mov.w @(disp,PC),Rn */
611 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
612 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
613 tcg_temp_free(addr);
615 return;
616 case 0xd000: /* mov.l @(disp,PC),Rn */
618 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
619 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
620 tcg_temp_free(addr);
622 return;
623 case 0x7000: /* add #imm,Rn */
624 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
625 return;
626 case 0xa000: /* bra disp */
627 CHECK_NOT_DELAY_SLOT
628 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
629 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
630 ctx->flags |= DELAY_SLOT;
631 return;
632 case 0xb000: /* bsr disp */
633 CHECK_NOT_DELAY_SLOT
634 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
635 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
636 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
637 ctx->flags |= DELAY_SLOT;
638 return;
641 switch (ctx->opcode & 0xf00f) {
642 case 0x6003: /* mov Rm,Rn */
643 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
644 return;
645 case 0x2000: /* mov.b Rm,@Rn */
646 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
647 return;
648 case 0x2001: /* mov.w Rm,@Rn */
649 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
650 return;
651 case 0x2002: /* mov.l Rm,@Rn */
652 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
653 return;
654 case 0x6000: /* mov.b @Rm,Rn */
655 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
656 return;
657 case 0x6001: /* mov.w @Rm,Rn */
658 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
659 return;
660 case 0x6002: /* mov.l @Rm,Rn */
661 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
662 return;
663 case 0x2004: /* mov.b Rm,@-Rn */
665 TCGv addr = tcg_temp_new();
666 tcg_gen_subi_i32(addr, REG(B11_8), 1);
667 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
668 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); /* modify register status */
669 tcg_temp_free(addr);
671 return;
672 case 0x2005: /* mov.w Rm,@-Rn */
674 TCGv addr = tcg_temp_new();
675 tcg_gen_subi_i32(addr, REG(B11_8), 2);
676 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
677 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 2);
678 tcg_temp_free(addr);
680 return;
681 case 0x2006: /* mov.l Rm,@-Rn */
683 TCGv addr = tcg_temp_new();
684 tcg_gen_subi_i32(addr, REG(B11_8), 4);
685 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
686 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
688 return;
689 case 0x6004: /* mov.b @Rm+,Rn */
690 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
691 if ( B11_8 != B7_4 )
692 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
693 return;
694 case 0x6005: /* mov.w @Rm+,Rn */
695 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
696 if ( B11_8 != B7_4 )
697 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
698 return;
699 case 0x6006: /* mov.l @Rm+,Rn */
700 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
701 if ( B11_8 != B7_4 )
702 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
703 return;
704 case 0x0004: /* mov.b Rm,@(R0,Rn) */
706 TCGv addr = tcg_temp_new();
707 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
708 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
709 tcg_temp_free(addr);
711 return;
712 case 0x0005: /* mov.w Rm,@(R0,Rn) */
714 TCGv addr = tcg_temp_new();
715 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
716 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
717 tcg_temp_free(addr);
719 return;
720 case 0x0006: /* mov.l Rm,@(R0,Rn) */
722 TCGv addr = tcg_temp_new();
723 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
724 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
725 tcg_temp_free(addr);
727 return;
728 case 0x000c: /* mov.b @(R0,Rm),Rn */
730 TCGv addr = tcg_temp_new();
731 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
732 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
733 tcg_temp_free(addr);
735 return;
736 case 0x000d: /* mov.w @(R0,Rm),Rn */
738 TCGv addr = tcg_temp_new();
739 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
740 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
741 tcg_temp_free(addr);
743 return;
744 case 0x000e: /* mov.l @(R0,Rm),Rn */
746 TCGv addr = tcg_temp_new();
747 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
748 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
749 tcg_temp_free(addr);
751 return;
752 case 0x6008: /* swap.b Rm,Rn */
754 TCGv highw, high, low;
755 highw = tcg_temp_new();
756 tcg_gen_andi_i32(highw, REG(B7_4), 0xffff0000);
757 high = tcg_temp_new();
758 tcg_gen_ext8u_i32(high, REG(B7_4));
759 tcg_gen_shli_i32(high, high, 8);
760 low = tcg_temp_new();
761 tcg_gen_shri_i32(low, REG(B7_4), 8);
762 tcg_gen_ext8u_i32(low, low);
763 tcg_gen_or_i32(REG(B11_8), high, low);
764 tcg_gen_or_i32(REG(B11_8), REG(B11_8), highw);
765 tcg_temp_free(low);
766 tcg_temp_free(high);
768 return;
769 case 0x6009: /* swap.w Rm,Rn */
771 TCGv high, low;
772 high = tcg_temp_new();
773 tcg_gen_ext16u_i32(high, REG(B7_4));
774 tcg_gen_shli_i32(high, high, 16);
775 low = tcg_temp_new();
776 tcg_gen_shri_i32(low, REG(B7_4), 16);
777 tcg_gen_ext16u_i32(low, low);
778 tcg_gen_or_i32(REG(B11_8), high, low);
779 tcg_temp_free(low);
780 tcg_temp_free(high);
782 return;
783 case 0x200d: /* xtrct Rm,Rn */
785 TCGv high, low;
786 high = tcg_temp_new();
787 tcg_gen_ext16u_i32(high, REG(B7_4));
788 tcg_gen_shli_i32(high, high, 16);
789 low = tcg_temp_new();
790 tcg_gen_shri_i32(low, REG(B11_8), 16);
791 tcg_gen_ext16u_i32(low, low);
792 tcg_gen_or_i32(REG(B11_8), high, low);
793 tcg_temp_free(low);
794 tcg_temp_free(high);
796 return;
797 case 0x300c: /* add Rm,Rn */
798 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
799 return;
800 case 0x300e: /* addc Rm,Rn */
801 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
802 return;
803 case 0x300f: /* addv Rm,Rn */
804 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
805 return;
806 case 0x2009: /* and Rm,Rn */
807 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
808 return;
809 case 0x3000: /* cmp/eq Rm,Rn */
810 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
811 return;
812 case 0x3003: /* cmp/ge Rm,Rn */
813 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
814 return;
815 case 0x3007: /* cmp/gt Rm,Rn */
816 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
817 return;
818 case 0x3006: /* cmp/hi Rm,Rn */
819 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
820 return;
821 case 0x3002: /* cmp/hs Rm,Rn */
822 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
823 return;
824 case 0x200c: /* cmp/str Rm,Rn */
826 int label1 = gen_new_label();
827 int label2 = gen_new_label();
828 TCGv cmp1 = tcg_temp_local_new();
829 TCGv cmp2 = tcg_temp_local_new();
830 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
831 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
832 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
833 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
834 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
835 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
836 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
837 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
838 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
839 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
840 tcg_gen_br(label2);
841 gen_set_label(label1);
842 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
843 gen_set_label(label2);
844 tcg_temp_free(cmp2);
845 tcg_temp_free(cmp1);
847 return;
848 case 0x2007: /* div0s Rm,Rn */
850 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
851 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
852 TCGv val = tcg_temp_new();
853 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
854 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
855 tcg_temp_free(val);
857 return;
858 case 0x3004: /* div1 Rm,Rn */
859 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
860 return;
861 case 0x300d: /* dmuls.l Rm,Rn */
863 TCGv_i64 tmp1 = tcg_temp_new_i64();
864 TCGv_i64 tmp2 = tcg_temp_new_i64();
866 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
867 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
868 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
869 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
870 tcg_gen_shri_i64(tmp1, tmp1, 32);
871 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
873 tcg_temp_free_i64(tmp2);
874 tcg_temp_free_i64(tmp1);
876 return;
877 case 0x3005: /* dmulu.l Rm,Rn */
879 TCGv_i64 tmp1 = tcg_temp_new_i64();
880 TCGv_i64 tmp2 = tcg_temp_new_i64();
882 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
883 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
884 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
885 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
886 tcg_gen_shri_i64(tmp1, tmp1, 32);
887 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
889 tcg_temp_free_i64(tmp2);
890 tcg_temp_free_i64(tmp1);
892 return;
893 case 0x600e: /* exts.b Rm,Rn */
894 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
895 return;
896 case 0x600f: /* exts.w Rm,Rn */
897 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
898 return;
899 case 0x600c: /* extu.b Rm,Rn */
900 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
901 return;
902 case 0x600d: /* extu.w Rm,Rn */
903 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
904 return;
905 case 0x000f: /* mac.l @Rm+,@Rn+ */
907 TCGv arg0, arg1;
908 arg0 = tcg_temp_new();
909 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
910 arg1 = tcg_temp_new();
911 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
912 gen_helper_macl(arg0, arg1);
913 tcg_temp_free(arg1);
914 tcg_temp_free(arg0);
915 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
916 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
918 return;
919 case 0x400f: /* mac.w @Rm+,@Rn+ */
921 TCGv arg0, arg1;
922 arg0 = tcg_temp_new();
923 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
924 arg1 = tcg_temp_new();
925 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
926 gen_helper_macw(arg0, arg1);
927 tcg_temp_free(arg1);
928 tcg_temp_free(arg0);
929 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
930 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
932 return;
933 case 0x0007: /* mul.l Rm,Rn */
934 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
935 return;
936 case 0x200f: /* muls.w Rm,Rn */
938 TCGv arg0, arg1;
939 arg0 = tcg_temp_new();
940 tcg_gen_ext16s_i32(arg0, REG(B7_4));
941 arg1 = tcg_temp_new();
942 tcg_gen_ext16s_i32(arg1, REG(B11_8));
943 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
944 tcg_temp_free(arg1);
945 tcg_temp_free(arg0);
947 return;
948 case 0x200e: /* mulu.w Rm,Rn */
950 TCGv arg0, arg1;
951 arg0 = tcg_temp_new();
952 tcg_gen_ext16u_i32(arg0, REG(B7_4));
953 arg1 = tcg_temp_new();
954 tcg_gen_ext16u_i32(arg1, REG(B11_8));
955 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
956 tcg_temp_free(arg1);
957 tcg_temp_free(arg0);
959 return;
960 case 0x600b: /* neg Rm,Rn */
961 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
962 return;
963 case 0x600a: /* negc Rm,Rn */
964 gen_helper_negc(REG(B11_8), REG(B7_4));
965 return;
966 case 0x6007: /* not Rm,Rn */
967 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
968 return;
969 case 0x200b: /* or Rm,Rn */
970 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
971 return;
972 case 0x400c: /* shad Rm,Rn */
974 int label1 = gen_new_label();
975 int label2 = gen_new_label();
976 int label3 = gen_new_label();
977 int label4 = gen_new_label();
978 TCGv shift = tcg_temp_local_new();
979 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
980 /* Rm positive, shift to the left */
981 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
982 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
983 tcg_gen_br(label4);
984 /* Rm negative, shift to the right */
985 gen_set_label(label1);
986 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
987 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
988 tcg_gen_not_i32(shift, REG(B7_4));
989 tcg_gen_andi_i32(shift, shift, 0x1f);
990 tcg_gen_addi_i32(shift, shift, 1);
991 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
992 tcg_gen_br(label4);
993 /* Rm = -32 */
994 gen_set_label(label2);
995 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
996 tcg_gen_movi_i32(REG(B11_8), 0);
997 tcg_gen_br(label4);
998 gen_set_label(label3);
999 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1000 gen_set_label(label4);
1001 tcg_temp_free(shift);
1003 return;
1004 case 0x400d: /* shld Rm,Rn */
1006 int label1 = gen_new_label();
1007 int label2 = gen_new_label();
1008 int label3 = gen_new_label();
1009 TCGv shift = tcg_temp_local_new();
1010 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1011 /* Rm positive, shift to the left */
1012 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1013 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1014 tcg_gen_br(label3);
1015 /* Rm negative, shift to the right */
1016 gen_set_label(label1);
1017 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1018 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1019 tcg_gen_not_i32(shift, REG(B7_4));
1020 tcg_gen_andi_i32(shift, shift, 0x1f);
1021 tcg_gen_addi_i32(shift, shift, 1);
1022 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1023 tcg_gen_br(label3);
1024 /* Rm = -32 */
1025 gen_set_label(label2);
1026 tcg_gen_movi_i32(REG(B11_8), 0);
1027 gen_set_label(label3);
1028 tcg_temp_free(shift);
1030 return;
1031 case 0x3008: /* sub Rm,Rn */
1032 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1033 return;
1034 case 0x300a: /* subc Rm,Rn */
1035 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1036 return;
1037 case 0x300b: /* subv Rm,Rn */
1038 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1039 return;
1040 case 0x2008: /* tst Rm,Rn */
1042 TCGv val = tcg_temp_new();
1043 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1044 gen_cmp_imm(TCG_COND_EQ, val, 0);
1045 tcg_temp_free(val);
1047 return;
1048 case 0x200a: /* xor Rm,Rn */
1049 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1050 return;
1051 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1052 CHECK_FPU_ENABLED
1053 if (ctx->fpscr & FPSCR_SZ) {
1054 TCGv_i64 fp = tcg_temp_new_i64();
1055 gen_load_fpr64(fp, XREG(B7_4));
1056 gen_store_fpr64(fp, XREG(B11_8));
1057 tcg_temp_free_i64(fp);
1058 } else {
1059 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1061 return;
1062 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1063 CHECK_FPU_ENABLED
1064 if (ctx->fpscr & FPSCR_SZ) {
1065 TCGv addr_hi = tcg_temp_new();
1066 int fr = XREG(B7_4);
1067 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1068 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1069 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1070 tcg_temp_free(addr_hi);
1071 } else {
1072 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1074 return;
1075 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1076 CHECK_FPU_ENABLED
1077 if (ctx->fpscr & FPSCR_SZ) {
1078 TCGv addr_hi = tcg_temp_new();
1079 int fr = XREG(B11_8);
1080 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1081 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1082 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1083 tcg_temp_free(addr_hi);
1084 } else {
1085 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1087 return;
1088 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1089 CHECK_FPU_ENABLED
1090 if (ctx->fpscr & FPSCR_SZ) {
1091 TCGv addr_hi = tcg_temp_new();
1092 int fr = XREG(B11_8);
1093 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1094 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1095 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1096 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1097 tcg_temp_free(addr_hi);
1098 } else {
1099 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1100 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1102 return;
1103 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1104 CHECK_FPU_ENABLED
1105 if (ctx->fpscr & FPSCR_SZ) {
1106 TCGv addr = tcg_temp_new_i32();
1107 int fr = XREG(B7_4);
1108 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1109 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1110 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1111 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1112 tcg_gen_mov_i32(REG(B11_8), addr);
1113 tcg_temp_free(addr);
1114 } else {
1115 TCGv addr;
1116 addr = tcg_temp_new_i32();
1117 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1118 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1119 tcg_temp_free(addr);
1120 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
1122 return;
1123 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1124 CHECK_FPU_ENABLED
1126 TCGv addr = tcg_temp_new_i32();
1127 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1128 if (ctx->fpscr & FPSCR_SZ) {
1129 int fr = XREG(B11_8);
1130 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1131 tcg_gen_addi_i32(addr, addr, 4);
1132 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1133 } else {
1134 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1136 tcg_temp_free(addr);
1138 return;
1139 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1140 CHECK_FPU_ENABLED
1142 TCGv addr = tcg_temp_new();
1143 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1144 if (ctx->fpscr & FPSCR_SZ) {
1145 int fr = XREG(B7_4);
1146 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1147 tcg_gen_addi_i32(addr, addr, 4);
1148 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1149 } else {
1150 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1152 tcg_temp_free(addr);
1154 return;
1155 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1156 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1157 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1158 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1159 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1160 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1162 CHECK_FPU_ENABLED
1163 if (ctx->fpscr & FPSCR_PR) {
1164 TCGv_i64 fp0, fp1;
1166 if (ctx->opcode & 0x0110)
1167 break; /* illegal instruction */
1168 fp0 = tcg_temp_new_i64();
1169 fp1 = tcg_temp_new_i64();
1170 gen_load_fpr64(fp0, DREG(B11_8));
1171 gen_load_fpr64(fp1, DREG(B7_4));
1172 switch (ctx->opcode & 0xf00f) {
1173 case 0xf000: /* fadd Rm,Rn */
1174 gen_helper_fadd_DT(fp0, fp0, fp1);
1175 break;
1176 case 0xf001: /* fsub Rm,Rn */
1177 gen_helper_fsub_DT(fp0, fp0, fp1);
1178 break;
1179 case 0xf002: /* fmul Rm,Rn */
1180 gen_helper_fmul_DT(fp0, fp0, fp1);
1181 break;
1182 case 0xf003: /* fdiv Rm,Rn */
1183 gen_helper_fdiv_DT(fp0, fp0, fp1);
1184 break;
1185 case 0xf004: /* fcmp/eq Rm,Rn */
1186 gen_helper_fcmp_eq_DT(fp0, fp1);
1187 return;
1188 case 0xf005: /* fcmp/gt Rm,Rn */
1189 gen_helper_fcmp_gt_DT(fp0, fp1);
1190 return;
1192 gen_store_fpr64(fp0, DREG(B11_8));
1193 tcg_temp_free_i64(fp0);
1194 tcg_temp_free_i64(fp1);
1195 } else {
1196 switch (ctx->opcode & 0xf00f) {
1197 case 0xf000: /* fadd Rm,Rn */
1198 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1199 break;
1200 case 0xf001: /* fsub Rm,Rn */
1201 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1202 break;
1203 case 0xf002: /* fmul Rm,Rn */
1204 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1205 break;
1206 case 0xf003: /* fdiv Rm,Rn */
1207 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1208 break;
1209 case 0xf004: /* fcmp/eq Rm,Rn */
1210 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1211 return;
1212 case 0xf005: /* fcmp/gt Rm,Rn */
1213 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1214 return;
1218 return;
1219 case 0xf00e: /* fmac FR0,RM,Rn */
1221 CHECK_FPU_ENABLED
1222 if (ctx->fpscr & FPSCR_PR) {
1223 break; /* illegal instruction */
1224 } else {
1225 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1226 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1227 return;
1232 switch (ctx->opcode & 0xff00) {
1233 case 0xc900: /* and #imm,R0 */
1234 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1235 return;
1236 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1238 TCGv addr, val;
1239 addr = tcg_temp_new();
1240 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1241 val = tcg_temp_new();
1242 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1243 tcg_gen_andi_i32(val, val, B7_0);
1244 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1245 tcg_temp_free(val);
1246 tcg_temp_free(addr);
1248 return;
1249 case 0x8b00: /* bf label */
1250 CHECK_NOT_DELAY_SLOT
1251 gen_conditional_jump(ctx, ctx->pc + 2,
1252 ctx->pc + 4 + B7_0s * 2);
1253 ctx->bstate = BS_BRANCH;
1254 return;
1255 case 0x8f00: /* bf/s label */
1256 CHECK_NOT_DELAY_SLOT
1257 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1258 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1259 return;
1260 case 0x8900: /* bt label */
1261 CHECK_NOT_DELAY_SLOT
1262 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1263 ctx->pc + 2);
1264 ctx->bstate = BS_BRANCH;
1265 return;
1266 case 0x8d00: /* bt/s label */
1267 CHECK_NOT_DELAY_SLOT
1268 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1269 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1270 return;
1271 case 0x8800: /* cmp/eq #imm,R0 */
1272 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1273 return;
1274 case 0xc400: /* mov.b @(disp,GBR),R0 */
1276 TCGv addr = tcg_temp_new();
1277 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1278 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1279 tcg_temp_free(addr);
1281 return;
1282 case 0xc500: /* mov.w @(disp,GBR),R0 */
1284 TCGv addr = tcg_temp_new();
1285 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1286 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1287 tcg_temp_free(addr);
1289 return;
1290 case 0xc600: /* mov.l @(disp,GBR),R0 */
1292 TCGv addr = tcg_temp_new();
1293 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1294 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1295 tcg_temp_free(addr);
1297 return;
1298 case 0xc000: /* mov.b R0,@(disp,GBR) */
1300 TCGv addr = tcg_temp_new();
1301 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1302 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1303 tcg_temp_free(addr);
1305 return;
1306 case 0xc100: /* mov.w R0,@(disp,GBR) */
1308 TCGv addr = tcg_temp_new();
1309 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1310 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1311 tcg_temp_free(addr);
1313 return;
1314 case 0xc200: /* mov.l R0,@(disp,GBR) */
1316 TCGv addr = tcg_temp_new();
1317 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1318 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1319 tcg_temp_free(addr);
1321 return;
1322 case 0x8000: /* mov.b R0,@(disp,Rn) */
1324 TCGv addr = tcg_temp_new();
1325 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1326 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1327 tcg_temp_free(addr);
1329 return;
1330 case 0x8100: /* mov.w R0,@(disp,Rn) */
1332 TCGv addr = tcg_temp_new();
1333 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1334 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1335 tcg_temp_free(addr);
1337 return;
1338 case 0x8400: /* mov.b @(disp,Rn),R0 */
1340 TCGv addr = tcg_temp_new();
1341 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1342 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1343 tcg_temp_free(addr);
1345 return;
1346 case 0x8500: /* mov.w @(disp,Rn),R0 */
1348 TCGv addr = tcg_temp_new();
1349 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1350 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1351 tcg_temp_free(addr);
1353 return;
1354 case 0xc700: /* mova @(disp,PC),R0 */
1355 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1356 return;
1357 case 0xcb00: /* or #imm,R0 */
1358 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1359 return;
1360 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1362 TCGv addr, val;
1363 addr = tcg_temp_new();
1364 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1365 val = tcg_temp_new();
1366 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1367 tcg_gen_ori_i32(val, val, B7_0);
1368 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1369 tcg_temp_free(val);
1370 tcg_temp_free(addr);
1372 return;
1373 case 0xc300: /* trapa #imm */
1375 TCGv imm;
1376 CHECK_NOT_DELAY_SLOT
1377 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1378 imm = tcg_const_i32(B7_0);
1379 gen_helper_trapa(imm);
1380 tcg_temp_free(imm);
1381 ctx->bstate = BS_BRANCH;
1383 return;
1384 case 0xc800: /* tst #imm,R0 */
1386 TCGv val = tcg_temp_new();
1387 tcg_gen_andi_i32(val, REG(0), B7_0);
1388 gen_cmp_imm(TCG_COND_EQ, val, 0);
1389 tcg_temp_free(val);
1391 return;
1392 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1394 TCGv val = tcg_temp_new();
1395 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1396 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1397 tcg_gen_andi_i32(val, val, B7_0);
1398 gen_cmp_imm(TCG_COND_EQ, val, 0);
1399 tcg_temp_free(val);
1401 return;
1402 case 0xca00: /* xor #imm,R0 */
1403 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1404 return;
1405 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1407 TCGv addr, val;
1408 addr = tcg_temp_new();
1409 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1410 val = tcg_temp_new();
1411 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1412 tcg_gen_xori_i32(val, val, B7_0);
1413 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1414 tcg_temp_free(val);
1415 tcg_temp_free(addr);
1417 return;
1420 switch (ctx->opcode & 0xf08f) {
1421 case 0x408e: /* ldc Rm,Rn_BANK */
1422 CHECK_PRIVILEGED
1423 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1424 return;
1425 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1426 CHECK_PRIVILEGED
1427 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1428 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1429 return;
1430 case 0x0082: /* stc Rm_BANK,Rn */
1431 CHECK_PRIVILEGED
1432 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1433 return;
1434 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1435 CHECK_PRIVILEGED
1437 TCGv addr = tcg_temp_new();
1438 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1439 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1440 tcg_temp_free(addr);
1441 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
1443 return;
1446 switch (ctx->opcode & 0xf0ff) {
1447 case 0x0023: /* braf Rn */
1448 CHECK_NOT_DELAY_SLOT
1449 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1450 ctx->flags |= DELAY_SLOT;
1451 ctx->delayed_pc = (uint32_t) - 1;
1452 return;
1453 case 0x0003: /* bsrf Rn */
1454 CHECK_NOT_DELAY_SLOT
1455 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1456 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1457 ctx->flags |= DELAY_SLOT;
1458 ctx->delayed_pc = (uint32_t) - 1;
1459 return;
1460 case 0x4015: /* cmp/pl Rn */
1461 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1462 return;
1463 case 0x4011: /* cmp/pz Rn */
1464 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1465 return;
1466 case 0x4010: /* dt Rn */
1467 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1468 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1469 return;
1470 case 0x402b: /* jmp @Rn */
1471 CHECK_NOT_DELAY_SLOT
1472 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1473 ctx->flags |= DELAY_SLOT;
1474 ctx->delayed_pc = (uint32_t) - 1;
1475 return;
1476 case 0x400b: /* jsr @Rn */
1477 CHECK_NOT_DELAY_SLOT
1478 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1479 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1480 ctx->flags |= DELAY_SLOT;
1481 ctx->delayed_pc = (uint32_t) - 1;
1482 return;
1483 case 0x400e: /* ldc Rm,SR */
1484 CHECK_PRIVILEGED
1485 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1486 ctx->bstate = BS_STOP;
1487 return;
1488 case 0x4007: /* ldc.l @Rm+,SR */
1489 CHECK_PRIVILEGED
1491 TCGv val = tcg_temp_new();
1492 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1493 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1494 tcg_temp_free(val);
1495 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1496 ctx->bstate = BS_STOP;
1498 return;
1499 case 0x0002: /* stc SR,Rn */
1500 CHECK_PRIVILEGED
1501 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1502 return;
1503 case 0x4003: /* stc SR,@-Rn */
1504 CHECK_PRIVILEGED
1506 TCGv addr = tcg_temp_new();
1507 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1508 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1509 tcg_temp_free(addr);
1510 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
1512 return;
1513 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1514 case ldnum: \
1515 prechk \
1516 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1517 return; \
1518 case ldpnum: \
1519 prechk \
1520 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1521 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1522 return; \
1523 case stnum: \
1524 prechk \
1525 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1526 return; \
1527 case stpnum: \
1528 prechk \
1530 TCGv addr = tcg_temp_new(); \
1531 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1532 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1533 tcg_temp_free(addr); \
1534 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); \
1536 return;
1537 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1538 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1539 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1540 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1541 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1542 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1543 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1544 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1545 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1546 case 0x406a: /* lds Rm,FPSCR */
1547 CHECK_FPU_ENABLED
1548 gen_helper_ld_fpscr(REG(B11_8));
1549 ctx->bstate = BS_STOP;
1550 return;
1551 case 0x4066: /* lds.l @Rm+,FPSCR */
1552 CHECK_FPU_ENABLED
1554 TCGv addr = tcg_temp_new();
1555 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1556 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1557 gen_helper_ld_fpscr(addr);
1558 tcg_temp_free(addr);
1559 ctx->bstate = BS_STOP;
1561 return;
1562 case 0x006a: /* sts FPSCR,Rn */
1563 CHECK_FPU_ENABLED
1564 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1565 return;
1566 case 0x4062: /* sts FPSCR,@-Rn */
1567 CHECK_FPU_ENABLED
1569 TCGv addr, val;
1570 val = tcg_temp_new();
1571 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1572 addr = tcg_temp_new();
1573 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1574 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1575 tcg_temp_free(addr);
1576 tcg_temp_free(val);
1577 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4);
1579 return;
1580 case 0x00c3: /* movca.l R0,@Rm */
1582 TCGv val = tcg_temp_new();
1583 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1584 gen_helper_movcal (REG(B11_8), val);
1585 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1587 ctx->has_movcal = 1;
1588 return;
1589 case 0x40a9:
1590 /* MOVUA.L @Rm,R0 (Rm) -> R0
1591 Load non-boundary-aligned data */
1592 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1593 return;
1594 case 0x40e9:
1595 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1596 Load non-boundary-aligned data */
1597 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1598 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1599 return;
1600 case 0x0029: /* movt Rn */
1601 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1602 return;
1603 case 0x0073:
1604 /* MOVCO.L
1605 LDST -> T
1606 If (T == 1) R0 -> (Rn)
1607 0 -> LDST
1609 if (ctx->features & SH_FEATURE_SH4A) {
1610 int label = gen_new_label();
1611 gen_clr_t();
1612 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1613 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1614 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1615 gen_set_label(label);
1616 tcg_gen_movi_i32(cpu_ldst, 0);
1617 return;
1618 } else
1619 break;
1620 case 0x0063:
1621 /* MOVLI.L @Rm,R0
1622 1 -> LDST
1623 (Rm) -> R0
1624 When interrupt/exception
1625 occurred 0 -> LDST
1627 if (ctx->features & SH_FEATURE_SH4A) {
1628 tcg_gen_movi_i32(cpu_ldst, 0);
1629 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1630 tcg_gen_movi_i32(cpu_ldst, 1);
1631 return;
1632 } else
1633 break;
1634 case 0x0093: /* ocbi @Rn */
1636 gen_helper_ocbi (REG(B11_8));
1638 return;
1639 case 0x00a3: /* ocbp @Rn */
1641 TCGv dummy = tcg_temp_new();
1642 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1643 tcg_temp_free(dummy);
1645 return;
1646 case 0x00b3: /* ocbwb @Rn */
1648 TCGv dummy = tcg_temp_new();
1649 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1650 tcg_temp_free(dummy);
1652 return;
1653 case 0x0083: /* pref @Rn */
1654 return;
1655 case 0x00d3: /* prefi @Rn */
1656 if (ctx->features & SH_FEATURE_SH4A)
1657 return;
1658 else
1659 break;
1660 case 0x00e3: /* icbi @Rn */
1661 if (ctx->features & SH_FEATURE_SH4A)
1662 return;
1663 else
1664 break;
1665 case 0x00ab: /* synco */
1666 if (ctx->features & SH_FEATURE_SH4A)
1667 return;
1668 else
1669 break;
1670 case 0x4024: /* rotcl Rn */
1672 TCGv tmp = tcg_temp_new();
1673 tcg_gen_mov_i32(tmp, cpu_sr);
1674 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1675 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1676 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1677 tcg_temp_free(tmp);
1679 return;
1680 case 0x4025: /* rotcr Rn */
1682 TCGv tmp = tcg_temp_new();
1683 tcg_gen_mov_i32(tmp, cpu_sr);
1684 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1685 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1686 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1687 tcg_temp_free(tmp);
1689 return;
1690 case 0x4004: /* rotl Rn */
1691 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1692 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1693 gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
1694 return;
1695 case 0x4005: /* rotr Rn */
1696 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1697 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1698 gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
1699 return;
1700 case 0x4000: /* shll Rn */
1701 case 0x4020: /* shal Rn */
1702 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1703 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1704 return;
1705 case 0x4021: /* shar Rn */
1706 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1707 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1708 return;
1709 case 0x4001: /* shlr Rn */
1710 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1711 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1712 return;
1713 case 0x4008: /* shll2 Rn */
1714 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1715 return;
1716 case 0x4018: /* shll8 Rn */
1717 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1718 return;
1719 case 0x4028: /* shll16 Rn */
1720 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1721 return;
1722 case 0x4009: /* shlr2 Rn */
1723 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1724 return;
1725 case 0x4019: /* shlr8 Rn */
1726 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1727 return;
1728 case 0x4029: /* shlr16 Rn */
1729 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1730 return;
1731 case 0x401b: /* tas.b @Rn */
1733 TCGv addr, val;
1734 addr = tcg_temp_local_new();
1735 tcg_gen_mov_i32(addr, REG(B11_8));
1736 val = tcg_temp_local_new();
1737 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1738 gen_cmp_imm(TCG_COND_EQ, val, 0);
1739 tcg_gen_ori_i32(val, val, 0x80);
1740 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1741 tcg_temp_free(val);
1742 tcg_temp_free(addr);
1744 return;
1745 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1746 CHECK_FPU_ENABLED
1747 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1748 return;
1749 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1750 CHECK_FPU_ENABLED
1751 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1752 return;
1753 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1754 CHECK_FPU_ENABLED
1755 if (ctx->fpscr & FPSCR_PR) {
1756 TCGv_i64 fp;
1757 if (ctx->opcode & 0x0100)
1758 break; /* illegal instruction */
1759 fp = tcg_temp_new_i64();
1760 gen_helper_float_DT(fp, cpu_fpul);
1761 gen_store_fpr64(fp, DREG(B11_8));
1762 tcg_temp_free_i64(fp);
1764 else {
1765 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1767 return;
1768 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1769 CHECK_FPU_ENABLED
1770 if (ctx->fpscr & FPSCR_PR) {
1771 TCGv_i64 fp;
1772 if (ctx->opcode & 0x0100)
1773 break; /* illegal instruction */
1774 fp = tcg_temp_new_i64();
1775 gen_load_fpr64(fp, DREG(B11_8));
1776 gen_helper_ftrc_DT(cpu_fpul, fp);
1777 tcg_temp_free_i64(fp);
1779 else {
1780 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1782 return;
1783 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1784 CHECK_FPU_ENABLED
1786 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1788 return;
1789 case 0xf05d: /* fabs FRn/DRn */
1790 CHECK_FPU_ENABLED
1791 if (ctx->fpscr & FPSCR_PR) {
1792 if (ctx->opcode & 0x0100)
1793 break; /* illegal instruction */
1794 TCGv_i64 fp = tcg_temp_new_i64();
1795 gen_load_fpr64(fp, DREG(B11_8));
1796 gen_helper_fabs_DT(fp, fp);
1797 gen_store_fpr64(fp, DREG(B11_8));
1798 tcg_temp_free_i64(fp);
1799 } else {
1800 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1802 return;
1803 case 0xf06d: /* fsqrt FRn */
1804 CHECK_FPU_ENABLED
1805 if (ctx->fpscr & FPSCR_PR) {
1806 if (ctx->opcode & 0x0100)
1807 break; /* illegal instruction */
1808 TCGv_i64 fp = tcg_temp_new_i64();
1809 gen_load_fpr64(fp, DREG(B11_8));
1810 gen_helper_fsqrt_DT(fp, fp);
1811 gen_store_fpr64(fp, DREG(B11_8));
1812 tcg_temp_free_i64(fp);
1813 } else {
1814 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1816 return;
1817 case 0xf07d: /* fsrra FRn */
1818 CHECK_FPU_ENABLED
1819 break;
1820 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1821 CHECK_FPU_ENABLED
1822 if (!(ctx->fpscr & FPSCR_PR)) {
1823 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1825 return;
1826 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1827 CHECK_FPU_ENABLED
1828 if (!(ctx->fpscr & FPSCR_PR)) {
1829 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1831 return;
1832 case 0xf0ad: /* fcnvsd FPUL,DRn */
1833 CHECK_FPU_ENABLED
1835 TCGv_i64 fp = tcg_temp_new_i64();
1836 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1837 gen_store_fpr64(fp, DREG(B11_8));
1838 tcg_temp_free_i64(fp);
1840 return;
1841 case 0xf0bd: /* fcnvds DRn,FPUL */
1842 CHECK_FPU_ENABLED
1844 TCGv_i64 fp = tcg_temp_new_i64();
1845 gen_load_fpr64(fp, DREG(B11_8));
1846 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1847 tcg_temp_free_i64(fp);
1849 return;
1851 #if 0
1852 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1853 ctx->opcode, ctx->pc);
1854 fflush(stderr);
1855 #endif
1856 gen_helper_raise_illegal_instruction();
1857 ctx->bstate = BS_EXCP;
1860 static void decode_opc(DisasContext * ctx)
1862 uint32_t old_flags = ctx->flags;
1864 _decode_opc(ctx);
1866 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1867 if (ctx->flags & DELAY_SLOT_CLEARME) {
1868 gen_store_flags(0);
1869 } else {
1870 /* go out of the delay slot */
1871 uint32_t new_flags = ctx->flags;
1872 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1873 gen_store_flags(new_flags);
1875 ctx->flags = 0;
1876 ctx->bstate = BS_BRANCH;
1877 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1878 gen_delayed_conditional_jump(ctx);
1879 } else if (old_flags & DELAY_SLOT) {
1880 gen_jump(ctx);
1885 /* go into a delay slot */
1886 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1887 gen_store_flags(ctx->flags);
1890 static inline void
1891 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1892 int search_pc)
1894 DisasContext ctx;
1895 target_ulong pc_start;
1896 static uint16_t *gen_opc_end;
1897 CPUBreakpoint *bp;
1898 int i, ii;
1899 int num_insns;
1900 int max_insns;
1902 pc_start = tb->pc;
1903 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1904 ctx.pc = pc_start;
1905 ctx.flags = (uint32_t)tb->flags;
1906 ctx.bstate = BS_NONE;
1907 ctx.sr = env->sr;
1908 ctx.fpscr = env->fpscr;
1909 ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
1910 /* We don't know if the delayed pc came from a dynamic or static branch,
1911 so assume it is a dynamic branch. */
1912 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1913 ctx.tb = tb;
1914 ctx.singlestep_enabled = env->singlestep_enabled;
1915 ctx.features = env->features;
1916 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1918 #ifdef DEBUG_DISAS
1919 qemu_log_mask(CPU_LOG_TB_CPU,
1920 "------------------------------------------------\n");
1921 log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0);
1922 #endif
1924 ii = -1;
1925 num_insns = 0;
1926 max_insns = tb->cflags & CF_COUNT_MASK;
1927 if (max_insns == 0)
1928 max_insns = CF_COUNT_MASK;
1929 gen_icount_start();
1930 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1931 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
1932 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1933 if (ctx.pc == bp->pc) {
1934 /* We have hit a breakpoint - make sure PC is up-to-date */
1935 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1936 gen_helper_debug();
1937 ctx.bstate = BS_EXCP;
1938 break;
1942 if (search_pc) {
1943 i = gen_opc_ptr - gen_opc_buf;
1944 if (ii < i) {
1945 ii++;
1946 while (ii < i)
1947 gen_opc_instr_start[ii++] = 0;
1949 gen_opc_pc[ii] = ctx.pc;
1950 gen_opc_hflags[ii] = ctx.flags;
1951 gen_opc_instr_start[ii] = 1;
1952 gen_opc_icount[ii] = num_insns;
1954 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1955 gen_io_start();
1956 #if 0
1957 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1958 fflush(stderr);
1959 #endif
1960 ctx.opcode = lduw_code(ctx.pc);
1961 decode_opc(&ctx);
1962 num_insns++;
1963 ctx.pc += 2;
1964 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1965 break;
1966 if (env->singlestep_enabled)
1967 break;
1968 if (num_insns >= max_insns)
1969 break;
1970 if (singlestep)
1971 break;
1973 if (tb->cflags & CF_LAST_IO)
1974 gen_io_end();
1975 if (env->singlestep_enabled) {
1976 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1977 gen_helper_debug();
1978 } else {
1979 switch (ctx.bstate) {
1980 case BS_STOP:
1981 /* gen_op_interrupt_restart(); */
1982 /* fall through */
1983 case BS_NONE:
1984 if (ctx.flags) {
1985 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1987 gen_goto_tb(&ctx, 0, ctx.pc);
1988 break;
1989 case BS_EXCP:
1990 /* gen_op_interrupt_restart(); */
1991 tcg_gen_exit_tb(0);
1992 break;
1993 case BS_BRANCH:
1994 default:
1995 break;
1999 gen_icount_end(tb, num_insns);
2000 *gen_opc_ptr = INDEX_op_end;
2001 if (search_pc) {
2002 i = gen_opc_ptr - gen_opc_buf;
2003 ii++;
2004 while (ii <= i)
2005 gen_opc_instr_start[ii++] = 0;
2006 } else {
2007 tb->size = ctx.pc - pc_start;
2008 tb->icount = num_insns;
2011 #ifdef DEBUG_DISAS
2012 #ifdef SH4_DEBUG_DISAS
2013 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2014 #endif
2015 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2016 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2017 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2018 qemu_log("\n");
2020 #endif
2023 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2025 gen_intermediate_code_internal(env, tb, 0);
2028 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2030 gen_intermediate_code_internal(env, tb, 1);
2033 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2034 unsigned long searched_pc, int pc_pos, void *puc)
2036 env->pc = gen_opc_pc[pc_pos];
2037 env->flags = gen_opc_hflags[pc_pos];