target-sh4: optimize exceptions
[qemu.git] / target-sh4 / translate.c
blob69d507d864ad5b03e28c51dc37636c2e70b272c7
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #define DEBUG_DISAS
26 #define SH4_DEBUG_DISAS
27 //#define SH4_SINGLE_STEP
29 #include "cpu.h"
30 #include "exec-all.h"
31 #include "disas.h"
32 #include "tcg-op.h"
33 #include "qemu-common.h"
35 #include "helper.h"
36 #define GEN_HELPER 1
37 #include "helper.h"
39 typedef struct DisasContext {
40 struct TranslationBlock *tb;
41 target_ulong pc;
42 uint32_t sr;
43 uint32_t fpscr;
44 uint16_t opcode;
45 uint32_t flags;
46 int bstate;
47 int memidx;
48 uint32_t delayed_pc;
49 int singlestep_enabled;
50 uint32_t features;
51 int has_movcal;
52 } DisasContext;
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(ctx) 1
56 #else
57 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
58 #endif
60 enum {
61 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
62 * exception condition
64 BS_STOP = 1, /* We want to stop translation for any reason */
65 BS_BRANCH = 2, /* We reached a branch condition */
66 BS_EXCP = 3, /* We reached an exception condition */
69 /* global register indexes */
70 static TCGv_ptr cpu_env;
71 static TCGv cpu_gregs[24];
72 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
73 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
74 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
75 static TCGv cpu_fregs[32];
77 /* internal register indexes */
78 static TCGv cpu_flags, cpu_delayed_pc;
80 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
82 #include "gen-icount.h"
84 static void sh4_translate_init(void)
86 int i;
87 static int done_init = 0;
88 static const char * const gregnames[24] = {
89 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
90 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
91 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
92 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
93 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
95 static const char * const fregnames[32] = {
96 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
97 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
98 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
99 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
100 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
101 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
102 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
103 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
106 if (done_init)
107 return;
109 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111 for (i = 0; i < 24; i++)
112 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUState, gregs[i]),
114 gregnames[i]);
116 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, pc), "PC");
118 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, sr), "SR");
120 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, ssr), "SSR");
122 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, spc), "SPC");
124 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, gbr), "GBR");
126 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, vbr), "VBR");
128 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUState, sgr), "SGR");
130 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUState, dbr), "DBR");
132 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUState, mach), "MACH");
134 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUState, macl), "MACL");
136 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUState, pr), "PR");
138 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUState, fpscr), "FPSCR");
140 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
141 offsetof(CPUState, fpul), "FPUL");
143 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUState, flags), "_flags_");
145 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUState, delayed_pc),
147 "_delayed_pc_");
148 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
149 offsetof(CPUState, ldst), "_ldst_");
151 for (i = 0; i < 32; i++)
152 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
153 offsetof(CPUState, fregs[i]),
154 fregnames[i]);
156 /* register helpers */
157 #define GEN_HELPER 2
158 #include "helper.h"
160 done_init = 1;
163 void cpu_dump_state(CPUState * env, FILE * f,
164 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
165 int flags)
167 int i;
168 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
169 env->pc, env->sr, env->pr, env->fpscr);
170 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
171 env->spc, env->ssr, env->gbr, env->vbr);
172 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
173 env->sgr, env->dbr, env->delayed_pc, env->fpul);
174 for (i = 0; i < 24; i += 4) {
175 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
176 i, env->gregs[i], i + 1, env->gregs[i + 1],
177 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
179 if (env->flags & DELAY_SLOT) {
180 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
183 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void cpu_sh4_reset(CPUSH4State * env)
190 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
191 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
192 log_cpu_state(env, 0);
195 #if defined(CONFIG_USER_ONLY)
196 env->sr = 0;
197 #else
198 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
199 #endif
200 env->vbr = 0;
201 env->pc = 0xA0000000;
202 #if defined(CONFIG_USER_ONLY)
203 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
204 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
205 #else
206 env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
207 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
208 set_flush_to_zero(1, &env->fp_status);
209 #endif
210 set_default_nan_mode(1, &env->fp_status);
211 env->mmucr = 0;
214 typedef struct {
215 const char *name;
216 int id;
217 uint32_t pvr;
218 uint32_t prr;
219 uint32_t cvr;
220 uint32_t features;
221 } sh4_def_t;
223 static sh4_def_t sh4_defs[] = {
225 .name = "SH7750R",
226 .id = SH_CPU_SH7750R,
227 .pvr = 0x00050000,
228 .prr = 0x00000100,
229 .cvr = 0x00110000,
230 .features = SH_FEATURE_BCR3_AND_BCR4,
231 }, {
232 .name = "SH7751R",
233 .id = SH_CPU_SH7751R,
234 .pvr = 0x04050005,
235 .prr = 0x00000113,
236 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
237 .features = SH_FEATURE_BCR3_AND_BCR4,
238 }, {
239 .name = "SH7785",
240 .id = SH_CPU_SH7785,
241 .pvr = 0x10300700,
242 .prr = 0x00000200,
243 .cvr = 0x71440211,
244 .features = SH_FEATURE_SH4A,
248 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
250 int i;
252 if (strcasecmp(name, "any") == 0)
253 return &sh4_defs[0];
255 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
256 if (strcasecmp(name, sh4_defs[i].name) == 0)
257 return &sh4_defs[i];
259 return NULL;
262 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
264 int i;
266 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
267 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
270 static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
272 env->pvr = def->pvr;
273 env->prr = def->prr;
274 env->cvr = def->cvr;
275 env->id = def->id;
278 CPUSH4State *cpu_sh4_init(const char *cpu_model)
280 CPUSH4State *env;
281 const sh4_def_t *def;
283 def = cpu_sh4_find_by_name(cpu_model);
284 if (!def)
285 return NULL;
286 env = qemu_mallocz(sizeof(CPUSH4State));
287 env->features = def->features;
288 cpu_exec_init(env);
289 env->movcal_backup_tail = &(env->movcal_backup);
290 sh4_translate_init();
291 env->cpu_model_str = cpu_model;
292 cpu_sh4_reset(env);
293 cpu_sh4_register(env, def);
294 tlb_flush(env, 1);
295 qemu_init_vcpu(env);
296 return env;
299 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
301 TranslationBlock *tb;
302 tb = ctx->tb;
304 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
305 !ctx->singlestep_enabled) {
306 /* Use a direct jump if in same page and singlestep not enabled */
307 tcg_gen_goto_tb(n);
308 tcg_gen_movi_i32(cpu_pc, dest);
309 tcg_gen_exit_tb((long) tb + n);
310 } else {
311 tcg_gen_movi_i32(cpu_pc, dest);
312 if (ctx->singlestep_enabled)
313 gen_helper_debug();
314 tcg_gen_exit_tb(0);
318 static void gen_jump(DisasContext * ctx)
320 if (ctx->delayed_pc == (uint32_t) - 1) {
321 /* Target is not statically known, it comes necessarily from a
322 delayed jump as immediate jump are conditinal jumps */
323 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
324 if (ctx->singlestep_enabled)
325 gen_helper_debug();
326 tcg_gen_exit_tb(0);
327 } else {
328 gen_goto_tb(ctx, 0, ctx->delayed_pc);
332 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
334 TCGv sr;
335 int label = gen_new_label();
336 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
337 sr = tcg_temp_new();
338 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
339 tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
340 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
341 gen_set_label(label);
344 /* Immediate conditional jump (bt or bf) */
345 static void gen_conditional_jump(DisasContext * ctx,
346 target_ulong ift, target_ulong ifnott)
348 int l1;
349 TCGv sr;
351 l1 = gen_new_label();
352 sr = tcg_temp_new();
353 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
354 tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
355 gen_goto_tb(ctx, 0, ifnott);
356 gen_set_label(l1);
357 gen_goto_tb(ctx, 1, ift);
360 /* Delayed conditional jump (bt or bf) */
361 static void gen_delayed_conditional_jump(DisasContext * ctx)
363 int l1;
364 TCGv ds;
366 l1 = gen_new_label();
367 ds = tcg_temp_new();
368 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
369 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
370 gen_goto_tb(ctx, 1, ctx->pc + 2);
371 gen_set_label(l1);
372 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
373 gen_jump(ctx);
376 static inline void gen_set_t(void)
378 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
381 static inline void gen_clr_t(void)
383 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
386 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
388 int label1 = gen_new_label();
389 int label2 = gen_new_label();
390 tcg_gen_brcond_i32(cond, t1, t0, label1);
391 gen_clr_t();
392 tcg_gen_br(label2);
393 gen_set_label(label1);
394 gen_set_t();
395 gen_set_label(label2);
398 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
400 int label1 = gen_new_label();
401 int label2 = gen_new_label();
402 tcg_gen_brcondi_i32(cond, t0, imm, label1);
403 gen_clr_t();
404 tcg_gen_br(label2);
405 gen_set_label(label1);
406 gen_set_t();
407 gen_set_label(label2);
410 static inline void gen_store_flags(uint32_t flags)
412 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
413 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
416 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
418 TCGv tmp = tcg_temp_new();
420 p0 &= 0x1f;
421 p1 &= 0x1f;
423 tcg_gen_andi_i32(tmp, t1, (1 << p1));
424 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
425 if (p0 < p1)
426 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
427 else if (p0 > p1)
428 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
429 tcg_gen_or_i32(t0, t0, tmp);
431 tcg_temp_free(tmp);
434 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
436 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
439 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
441 TCGv_i32 tmp = tcg_temp_new_i32();
442 tcg_gen_trunc_i64_i32(tmp, t);
443 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
444 tcg_gen_shri_i64(t, t, 32);
445 tcg_gen_trunc_i64_i32(tmp, t);
446 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
447 tcg_temp_free_i32(tmp);
450 #define B3_0 (ctx->opcode & 0xf)
451 #define B6_4 ((ctx->opcode >> 4) & 0x7)
452 #define B7_4 ((ctx->opcode >> 4) & 0xf)
453 #define B7_0 (ctx->opcode & 0xff)
454 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
455 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
456 (ctx->opcode & 0xfff))
457 #define B11_8 ((ctx->opcode >> 8) & 0xf)
458 #define B15_12 ((ctx->opcode >> 12) & 0xf)
460 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
461 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
463 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
464 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
466 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
467 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
468 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
469 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
471 #define CHECK_NOT_DELAY_SLOT \
472 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
474 gen_helper_raise_slot_illegal_instruction(); \
475 ctx->bstate = BS_EXCP; \
476 return; \
479 #define CHECK_PRIVILEGED \
480 if (IS_USER(ctx)) { \
481 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
482 gen_helper_raise_slot_illegal_instruction(); \
483 } else { \
484 gen_helper_raise_illegal_instruction(); \
486 ctx->bstate = BS_EXCP; \
487 return; \
490 #define CHECK_FPU_ENABLED \
491 if (ctx->flags & SR_FD) { \
492 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
493 gen_helper_raise_slot_fpu_disable(); \
494 } else { \
495 gen_helper_raise_fpu_disable(); \
497 ctx->bstate = BS_EXCP; \
498 return; \
501 static void _decode_opc(DisasContext * ctx)
503 /* This code tries to make movcal emulation sufficiently
504 accurate for Linux purposes. This instruction writes
505 memory, and prior to that, always allocates a cache line.
506 It is used in two contexts:
507 - in memcpy, where data is copied in blocks, the first write
508 of to a block uses movca.l for performance.
509 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
510 to flush the cache. Here, the data written by movcal.l is never
511 written to memory, and the data written is just bogus.
513 To simulate this, we simulate movcal.l, we store the value to memory,
514 but we also remember the previous content. If we see ocbi, we check
515 if movcal.l for that address was done previously. If so, the write should
516 not have hit the memory, so we restore the previous content.
517 When we see an instruction that is neither movca.l
518 nor ocbi, the previous content is discarded.
520 To optimize, we only try to flush stores when we're at the start of
521 TB, or if we already saw movca.l in this TB and did not flush stores
522 yet. */
523 if (ctx->has_movcal)
525 int opcode = ctx->opcode & 0xf0ff;
526 if (opcode != 0x0093 /* ocbi */
527 && opcode != 0x00c3 /* movca.l */)
529 gen_helper_discard_movcal_backup ();
530 ctx->has_movcal = 0;
534 #if 0
535 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
536 #endif
538 switch (ctx->opcode) {
539 case 0x0019: /* div0u */
540 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
541 return;
542 case 0x000b: /* rts */
543 CHECK_NOT_DELAY_SLOT
544 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
545 ctx->flags |= DELAY_SLOT;
546 ctx->delayed_pc = (uint32_t) - 1;
547 return;
548 case 0x0028: /* clrmac */
549 tcg_gen_movi_i32(cpu_mach, 0);
550 tcg_gen_movi_i32(cpu_macl, 0);
551 return;
552 case 0x0048: /* clrs */
553 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
554 return;
555 case 0x0008: /* clrt */
556 gen_clr_t();
557 return;
558 case 0x0038: /* ldtlb */
559 CHECK_PRIVILEGED
560 gen_helper_ldtlb();
561 return;
562 case 0x002b: /* rte */
563 CHECK_PRIVILEGED
564 CHECK_NOT_DELAY_SLOT
565 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
566 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
567 ctx->flags |= DELAY_SLOT;
568 ctx->delayed_pc = (uint32_t) - 1;
569 return;
570 case 0x0058: /* sets */
571 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
572 return;
573 case 0x0018: /* sett */
574 gen_set_t();
575 return;
576 case 0xfbfd: /* frchg */
577 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
578 ctx->bstate = BS_STOP;
579 return;
580 case 0xf3fd: /* fschg */
581 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
582 ctx->bstate = BS_STOP;
583 return;
584 case 0x0009: /* nop */
585 return;
586 case 0x001b: /* sleep */
587 CHECK_PRIVILEGED
588 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
589 return;
592 switch (ctx->opcode & 0xf000) {
593 case 0x1000: /* mov.l Rm,@(disp,Rn) */
595 TCGv addr = tcg_temp_new();
596 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
597 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
598 tcg_temp_free(addr);
600 return;
601 case 0x5000: /* mov.l @(disp,Rm),Rn */
603 TCGv addr = tcg_temp_new();
604 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
605 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
606 tcg_temp_free(addr);
608 return;
609 case 0xe000: /* mov #imm,Rn */
610 tcg_gen_movi_i32(REG(B11_8), B7_0s);
611 return;
612 case 0x9000: /* mov.w @(disp,PC),Rn */
614 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
615 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
616 tcg_temp_free(addr);
618 return;
619 case 0xd000: /* mov.l @(disp,PC),Rn */
621 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
622 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
623 tcg_temp_free(addr);
625 return;
626 case 0x7000: /* add #imm,Rn */
627 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
628 return;
629 case 0xa000: /* bra disp */
630 CHECK_NOT_DELAY_SLOT
631 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
632 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
633 ctx->flags |= DELAY_SLOT;
634 return;
635 case 0xb000: /* bsr disp */
636 CHECK_NOT_DELAY_SLOT
637 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
638 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
639 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
640 ctx->flags |= DELAY_SLOT;
641 return;
644 switch (ctx->opcode & 0xf00f) {
645 case 0x6003: /* mov Rm,Rn */
646 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
647 return;
648 case 0x2000: /* mov.b Rm,@Rn */
649 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
650 return;
651 case 0x2001: /* mov.w Rm,@Rn */
652 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
653 return;
654 case 0x2002: /* mov.l Rm,@Rn */
655 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
656 return;
657 case 0x6000: /* mov.b @Rm,Rn */
658 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
659 return;
660 case 0x6001: /* mov.w @Rm,Rn */
661 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
662 return;
663 case 0x6002: /* mov.l @Rm,Rn */
664 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
665 return;
666 case 0x2004: /* mov.b Rm,@-Rn */
668 TCGv addr = tcg_temp_new();
669 tcg_gen_subi_i32(addr, REG(B11_8), 1);
670 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
671 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
672 tcg_temp_free(addr);
674 return;
675 case 0x2005: /* mov.w Rm,@-Rn */
677 TCGv addr = tcg_temp_new();
678 tcg_gen_subi_i32(addr, REG(B11_8), 2);
679 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
680 tcg_gen_mov_i32(REG(B11_8), addr);
681 tcg_temp_free(addr);
683 return;
684 case 0x2006: /* mov.l Rm,@-Rn */
686 TCGv addr = tcg_temp_new();
687 tcg_gen_subi_i32(addr, REG(B11_8), 4);
688 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
689 tcg_gen_mov_i32(REG(B11_8), addr);
691 return;
692 case 0x6004: /* mov.b @Rm+,Rn */
693 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
694 if ( B11_8 != B7_4 )
695 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
696 return;
697 case 0x6005: /* mov.w @Rm+,Rn */
698 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
699 if ( B11_8 != B7_4 )
700 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
701 return;
702 case 0x6006: /* mov.l @Rm+,Rn */
703 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
704 if ( B11_8 != B7_4 )
705 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
706 return;
707 case 0x0004: /* mov.b Rm,@(R0,Rn) */
709 TCGv addr = tcg_temp_new();
710 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
711 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
712 tcg_temp_free(addr);
714 return;
715 case 0x0005: /* mov.w Rm,@(R0,Rn) */
717 TCGv addr = tcg_temp_new();
718 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
719 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
720 tcg_temp_free(addr);
722 return;
723 case 0x0006: /* mov.l Rm,@(R0,Rn) */
725 TCGv addr = tcg_temp_new();
726 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
727 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
728 tcg_temp_free(addr);
730 return;
731 case 0x000c: /* mov.b @(R0,Rm),Rn */
733 TCGv addr = tcg_temp_new();
734 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
735 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
736 tcg_temp_free(addr);
738 return;
739 case 0x000d: /* mov.w @(R0,Rm),Rn */
741 TCGv addr = tcg_temp_new();
742 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
743 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
744 tcg_temp_free(addr);
746 return;
747 case 0x000e: /* mov.l @(R0,Rm),Rn */
749 TCGv addr = tcg_temp_new();
750 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
751 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
752 tcg_temp_free(addr);
754 return;
755 case 0x6008: /* swap.b Rm,Rn */
757 TCGv high, low;
758 high = tcg_temp_new();
759 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
760 low = tcg_temp_new();
761 tcg_gen_ext16u_i32(low, REG(B7_4));
762 tcg_gen_bswap16_i32(low, low);
763 tcg_gen_or_i32(REG(B11_8), high, low);
764 tcg_temp_free(low);
765 tcg_temp_free(high);
767 return;
768 case 0x6009: /* swap.w Rm,Rn */
770 TCGv high, low;
771 high = tcg_temp_new();
772 tcg_gen_shli_i32(high, REG(B7_4), 16);
773 low = tcg_temp_new();
774 tcg_gen_shri_i32(low, REG(B7_4), 16);
775 tcg_gen_ext16u_i32(low, low);
776 tcg_gen_or_i32(REG(B11_8), high, low);
777 tcg_temp_free(low);
778 tcg_temp_free(high);
780 return;
781 case 0x200d: /* xtrct Rm,Rn */
783 TCGv high, low;
784 high = tcg_temp_new();
785 tcg_gen_shli_i32(high, REG(B7_4), 16);
786 low = tcg_temp_new();
787 tcg_gen_shri_i32(low, REG(B11_8), 16);
788 tcg_gen_ext16u_i32(low, low);
789 tcg_gen_or_i32(REG(B11_8), high, low);
790 tcg_temp_free(low);
791 tcg_temp_free(high);
793 return;
794 case 0x300c: /* add Rm,Rn */
795 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
796 return;
797 case 0x300e: /* addc Rm,Rn */
798 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
799 return;
800 case 0x300f: /* addv Rm,Rn */
801 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
802 return;
803 case 0x2009: /* and Rm,Rn */
804 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
805 return;
806 case 0x3000: /* cmp/eq Rm,Rn */
807 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
808 return;
809 case 0x3003: /* cmp/ge Rm,Rn */
810 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
811 return;
812 case 0x3007: /* cmp/gt Rm,Rn */
813 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
814 return;
815 case 0x3006: /* cmp/hi Rm,Rn */
816 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
817 return;
818 case 0x3002: /* cmp/hs Rm,Rn */
819 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
820 return;
821 case 0x200c: /* cmp/str Rm,Rn */
823 int label1 = gen_new_label();
824 int label2 = gen_new_label();
825 TCGv cmp1 = tcg_temp_local_new();
826 TCGv cmp2 = tcg_temp_local_new();
827 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
828 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
829 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
830 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
831 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
832 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
833 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
834 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
835 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
836 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
837 tcg_gen_br(label2);
838 gen_set_label(label1);
839 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
840 gen_set_label(label2);
841 tcg_temp_free(cmp2);
842 tcg_temp_free(cmp1);
844 return;
845 case 0x2007: /* div0s Rm,Rn */
847 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
848 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
849 TCGv val = tcg_temp_new();
850 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
851 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
852 tcg_temp_free(val);
854 return;
855 case 0x3004: /* div1 Rm,Rn */
856 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
857 return;
858 case 0x300d: /* dmuls.l Rm,Rn */
860 TCGv_i64 tmp1 = tcg_temp_new_i64();
861 TCGv_i64 tmp2 = tcg_temp_new_i64();
863 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
864 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
865 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
866 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
867 tcg_gen_shri_i64(tmp1, tmp1, 32);
868 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
870 tcg_temp_free_i64(tmp2);
871 tcg_temp_free_i64(tmp1);
873 return;
874 case 0x3005: /* dmulu.l Rm,Rn */
876 TCGv_i64 tmp1 = tcg_temp_new_i64();
877 TCGv_i64 tmp2 = tcg_temp_new_i64();
879 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
880 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
881 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
882 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
883 tcg_gen_shri_i64(tmp1, tmp1, 32);
884 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
886 tcg_temp_free_i64(tmp2);
887 tcg_temp_free_i64(tmp1);
889 return;
890 case 0x600e: /* exts.b Rm,Rn */
891 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
892 return;
893 case 0x600f: /* exts.w Rm,Rn */
894 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
895 return;
896 case 0x600c: /* extu.b Rm,Rn */
897 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
898 return;
899 case 0x600d: /* extu.w Rm,Rn */
900 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
901 return;
902 case 0x000f: /* mac.l @Rm+,@Rn+ */
904 TCGv arg0, arg1;
905 arg0 = tcg_temp_new();
906 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
907 arg1 = tcg_temp_new();
908 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
909 gen_helper_macl(arg0, arg1);
910 tcg_temp_free(arg1);
911 tcg_temp_free(arg0);
912 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
913 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
915 return;
916 case 0x400f: /* mac.w @Rm+,@Rn+ */
918 TCGv arg0, arg1;
919 arg0 = tcg_temp_new();
920 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
921 arg1 = tcg_temp_new();
922 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
923 gen_helper_macw(arg0, arg1);
924 tcg_temp_free(arg1);
925 tcg_temp_free(arg0);
926 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
927 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
929 return;
930 case 0x0007: /* mul.l Rm,Rn */
931 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
932 return;
933 case 0x200f: /* muls.w Rm,Rn */
935 TCGv arg0, arg1;
936 arg0 = tcg_temp_new();
937 tcg_gen_ext16s_i32(arg0, REG(B7_4));
938 arg1 = tcg_temp_new();
939 tcg_gen_ext16s_i32(arg1, REG(B11_8));
940 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
941 tcg_temp_free(arg1);
942 tcg_temp_free(arg0);
944 return;
945 case 0x200e: /* mulu.w Rm,Rn */
947 TCGv arg0, arg1;
948 arg0 = tcg_temp_new();
949 tcg_gen_ext16u_i32(arg0, REG(B7_4));
950 arg1 = tcg_temp_new();
951 tcg_gen_ext16u_i32(arg1, REG(B11_8));
952 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
953 tcg_temp_free(arg1);
954 tcg_temp_free(arg0);
956 return;
957 case 0x600b: /* neg Rm,Rn */
958 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
959 return;
960 case 0x600a: /* negc Rm,Rn */
961 gen_helper_negc(REG(B11_8), REG(B7_4));
962 return;
963 case 0x6007: /* not Rm,Rn */
964 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
965 return;
966 case 0x200b: /* or Rm,Rn */
967 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
968 return;
969 case 0x400c: /* shad Rm,Rn */
971 int label1 = gen_new_label();
972 int label2 = gen_new_label();
973 int label3 = gen_new_label();
974 int label4 = gen_new_label();
975 TCGv shift;
976 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
977 /* Rm positive, shift to the left */
978 shift = tcg_temp_new();
979 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
980 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
981 tcg_temp_free(shift);
982 tcg_gen_br(label4);
983 /* Rm negative, shift to the right */
984 gen_set_label(label1);
985 shift = tcg_temp_new();
986 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
987 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
988 tcg_gen_not_i32(shift, REG(B7_4));
989 tcg_gen_andi_i32(shift, shift, 0x1f);
990 tcg_gen_addi_i32(shift, shift, 1);
991 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
992 tcg_temp_free(shift);
993 tcg_gen_br(label4);
994 /* Rm = -32 */
995 gen_set_label(label2);
996 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
997 tcg_gen_movi_i32(REG(B11_8), 0);
998 tcg_gen_br(label4);
999 gen_set_label(label3);
1000 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1001 gen_set_label(label4);
1003 return;
1004 case 0x400d: /* shld Rm,Rn */
1006 int label1 = gen_new_label();
1007 int label2 = gen_new_label();
1008 int label3 = gen_new_label();
1009 TCGv shift;
1010 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1011 /* Rm positive, shift to the left */
1012 shift = tcg_temp_new();
1013 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1014 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1015 tcg_temp_free(shift);
1016 tcg_gen_br(label3);
1017 /* Rm negative, shift to the right */
1018 gen_set_label(label1);
1019 shift = tcg_temp_new();
1020 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1021 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1022 tcg_gen_not_i32(shift, REG(B7_4));
1023 tcg_gen_andi_i32(shift, shift, 0x1f);
1024 tcg_gen_addi_i32(shift, shift, 1);
1025 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1026 tcg_temp_free(shift);
1027 tcg_gen_br(label3);
1028 /* Rm = -32 */
1029 gen_set_label(label2);
1030 tcg_gen_movi_i32(REG(B11_8), 0);
1031 gen_set_label(label3);
1033 return;
1034 case 0x3008: /* sub Rm,Rn */
1035 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1036 return;
1037 case 0x300a: /* subc Rm,Rn */
1038 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1039 return;
1040 case 0x300b: /* subv Rm,Rn */
1041 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1042 return;
1043 case 0x2008: /* tst Rm,Rn */
1045 TCGv val = tcg_temp_new();
1046 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1047 gen_cmp_imm(TCG_COND_EQ, val, 0);
1048 tcg_temp_free(val);
1050 return;
1051 case 0x200a: /* xor Rm,Rn */
1052 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1053 return;
1054 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1055 CHECK_FPU_ENABLED
1056 if (ctx->fpscr & FPSCR_SZ) {
1057 TCGv_i64 fp = tcg_temp_new_i64();
1058 gen_load_fpr64(fp, XREG(B7_4));
1059 gen_store_fpr64(fp, XREG(B11_8));
1060 tcg_temp_free_i64(fp);
1061 } else {
1062 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1064 return;
1065 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1066 CHECK_FPU_ENABLED
1067 if (ctx->fpscr & FPSCR_SZ) {
1068 TCGv addr_hi = tcg_temp_new();
1069 int fr = XREG(B7_4);
1070 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1071 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1072 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1073 tcg_temp_free(addr_hi);
1074 } else {
1075 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1077 return;
1078 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1079 CHECK_FPU_ENABLED
1080 if (ctx->fpscr & FPSCR_SZ) {
1081 TCGv addr_hi = tcg_temp_new();
1082 int fr = XREG(B11_8);
1083 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1084 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1085 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1086 tcg_temp_free(addr_hi);
1087 } else {
1088 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1090 return;
1091 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1092 CHECK_FPU_ENABLED
1093 if (ctx->fpscr & FPSCR_SZ) {
1094 TCGv addr_hi = tcg_temp_new();
1095 int fr = XREG(B11_8);
1096 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1097 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1098 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1099 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1100 tcg_temp_free(addr_hi);
1101 } else {
1102 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1103 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1105 return;
1106 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1107 CHECK_FPU_ENABLED
1108 if (ctx->fpscr & FPSCR_SZ) {
1109 TCGv addr = tcg_temp_new_i32();
1110 int fr = XREG(B7_4);
1111 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1112 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1113 tcg_gen_subi_i32(addr, addr, 4);
1114 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1115 tcg_gen_mov_i32(REG(B11_8), addr);
1116 tcg_temp_free(addr);
1117 } else {
1118 TCGv addr;
1119 addr = tcg_temp_new_i32();
1120 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1121 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1122 tcg_gen_mov_i32(REG(B11_8), addr);
1123 tcg_temp_free(addr);
1125 return;
1126 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1127 CHECK_FPU_ENABLED
1129 TCGv addr = tcg_temp_new_i32();
1130 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1131 if (ctx->fpscr & FPSCR_SZ) {
1132 int fr = XREG(B11_8);
1133 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1134 tcg_gen_addi_i32(addr, addr, 4);
1135 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1136 } else {
1137 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1139 tcg_temp_free(addr);
1141 return;
1142 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1143 CHECK_FPU_ENABLED
1145 TCGv addr = tcg_temp_new();
1146 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1147 if (ctx->fpscr & FPSCR_SZ) {
1148 int fr = XREG(B7_4);
1149 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1150 tcg_gen_addi_i32(addr, addr, 4);
1151 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1152 } else {
1153 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1155 tcg_temp_free(addr);
1157 return;
1158 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1159 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1160 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1161 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1162 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1163 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1165 CHECK_FPU_ENABLED
1166 if (ctx->fpscr & FPSCR_PR) {
1167 TCGv_i64 fp0, fp1;
1169 if (ctx->opcode & 0x0110)
1170 break; /* illegal instruction */
1171 fp0 = tcg_temp_new_i64();
1172 fp1 = tcg_temp_new_i64();
1173 gen_load_fpr64(fp0, DREG(B11_8));
1174 gen_load_fpr64(fp1, DREG(B7_4));
1175 switch (ctx->opcode & 0xf00f) {
1176 case 0xf000: /* fadd Rm,Rn */
1177 gen_helper_fadd_DT(fp0, fp0, fp1);
1178 break;
1179 case 0xf001: /* fsub Rm,Rn */
1180 gen_helper_fsub_DT(fp0, fp0, fp1);
1181 break;
1182 case 0xf002: /* fmul Rm,Rn */
1183 gen_helper_fmul_DT(fp0, fp0, fp1);
1184 break;
1185 case 0xf003: /* fdiv Rm,Rn */
1186 gen_helper_fdiv_DT(fp0, fp0, fp1);
1187 break;
1188 case 0xf004: /* fcmp/eq Rm,Rn */
1189 gen_helper_fcmp_eq_DT(fp0, fp1);
1190 return;
1191 case 0xf005: /* fcmp/gt Rm,Rn */
1192 gen_helper_fcmp_gt_DT(fp0, fp1);
1193 return;
1195 gen_store_fpr64(fp0, DREG(B11_8));
1196 tcg_temp_free_i64(fp0);
1197 tcg_temp_free_i64(fp1);
1198 } else {
1199 switch (ctx->opcode & 0xf00f) {
1200 case 0xf000: /* fadd Rm,Rn */
1201 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1202 break;
1203 case 0xf001: /* fsub Rm,Rn */
1204 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1205 break;
1206 case 0xf002: /* fmul Rm,Rn */
1207 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1208 break;
1209 case 0xf003: /* fdiv Rm,Rn */
1210 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1211 break;
1212 case 0xf004: /* fcmp/eq Rm,Rn */
1213 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1214 return;
1215 case 0xf005: /* fcmp/gt Rm,Rn */
1216 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1217 return;
1221 return;
1222 case 0xf00e: /* fmac FR0,RM,Rn */
1224 CHECK_FPU_ENABLED
1225 if (ctx->fpscr & FPSCR_PR) {
1226 break; /* illegal instruction */
1227 } else {
1228 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1229 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1230 return;
1235 switch (ctx->opcode & 0xff00) {
1236 case 0xc900: /* and #imm,R0 */
1237 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1238 return;
1239 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1241 TCGv addr, val;
1242 addr = tcg_temp_new();
1243 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1244 val = tcg_temp_new();
1245 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1246 tcg_gen_andi_i32(val, val, B7_0);
1247 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1248 tcg_temp_free(val);
1249 tcg_temp_free(addr);
1251 return;
1252 case 0x8b00: /* bf label */
1253 CHECK_NOT_DELAY_SLOT
1254 gen_conditional_jump(ctx, ctx->pc + 2,
1255 ctx->pc + 4 + B7_0s * 2);
1256 ctx->bstate = BS_BRANCH;
1257 return;
1258 case 0x8f00: /* bf/s label */
1259 CHECK_NOT_DELAY_SLOT
1260 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1261 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1262 return;
1263 case 0x8900: /* bt label */
1264 CHECK_NOT_DELAY_SLOT
1265 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1266 ctx->pc + 2);
1267 ctx->bstate = BS_BRANCH;
1268 return;
1269 case 0x8d00: /* bt/s label */
1270 CHECK_NOT_DELAY_SLOT
1271 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1272 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1273 return;
1274 case 0x8800: /* cmp/eq #imm,R0 */
1275 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1276 return;
1277 case 0xc400: /* mov.b @(disp,GBR),R0 */
1279 TCGv addr = tcg_temp_new();
1280 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1281 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1282 tcg_temp_free(addr);
1284 return;
1285 case 0xc500: /* mov.w @(disp,GBR),R0 */
1287 TCGv addr = tcg_temp_new();
1288 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1289 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1290 tcg_temp_free(addr);
1292 return;
1293 case 0xc600: /* mov.l @(disp,GBR),R0 */
1295 TCGv addr = tcg_temp_new();
1296 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1297 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1298 tcg_temp_free(addr);
1300 return;
1301 case 0xc000: /* mov.b R0,@(disp,GBR) */
1303 TCGv addr = tcg_temp_new();
1304 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1305 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1306 tcg_temp_free(addr);
1308 return;
1309 case 0xc100: /* mov.w R0,@(disp,GBR) */
1311 TCGv addr = tcg_temp_new();
1312 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1313 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1314 tcg_temp_free(addr);
1316 return;
1317 case 0xc200: /* mov.l R0,@(disp,GBR) */
1319 TCGv addr = tcg_temp_new();
1320 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1321 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1322 tcg_temp_free(addr);
1324 return;
1325 case 0x8000: /* mov.b R0,@(disp,Rn) */
1327 TCGv addr = tcg_temp_new();
1328 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1329 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1330 tcg_temp_free(addr);
1332 return;
1333 case 0x8100: /* mov.w R0,@(disp,Rn) */
1335 TCGv addr = tcg_temp_new();
1336 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1337 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1338 tcg_temp_free(addr);
1340 return;
1341 case 0x8400: /* mov.b @(disp,Rn),R0 */
1343 TCGv addr = tcg_temp_new();
1344 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1345 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1346 tcg_temp_free(addr);
1348 return;
1349 case 0x8500: /* mov.w @(disp,Rn),R0 */
1351 TCGv addr = tcg_temp_new();
1352 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1353 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1354 tcg_temp_free(addr);
1356 return;
1357 case 0xc700: /* mova @(disp,PC),R0 */
1358 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1359 return;
1360 case 0xcb00: /* or #imm,R0 */
1361 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1362 return;
1363 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1365 TCGv addr, val;
1366 addr = tcg_temp_new();
1367 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1368 val = tcg_temp_new();
1369 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1370 tcg_gen_ori_i32(val, val, B7_0);
1371 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1372 tcg_temp_free(val);
1373 tcg_temp_free(addr);
1375 return;
1376 case 0xc300: /* trapa #imm */
1378 TCGv imm;
1379 CHECK_NOT_DELAY_SLOT
1380 imm = tcg_const_i32(B7_0);
1381 gen_helper_trapa(imm);
1382 tcg_temp_free(imm);
1383 ctx->bstate = BS_BRANCH;
1385 return;
1386 case 0xc800: /* tst #imm,R0 */
1388 TCGv val = tcg_temp_new();
1389 tcg_gen_andi_i32(val, REG(0), B7_0);
1390 gen_cmp_imm(TCG_COND_EQ, val, 0);
1391 tcg_temp_free(val);
1393 return;
1394 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1396 TCGv val = tcg_temp_new();
1397 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1398 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1399 tcg_gen_andi_i32(val, val, B7_0);
1400 gen_cmp_imm(TCG_COND_EQ, val, 0);
1401 tcg_temp_free(val);
1403 return;
1404 case 0xca00: /* xor #imm,R0 */
1405 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1406 return;
1407 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1409 TCGv addr, val;
1410 addr = tcg_temp_new();
1411 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1412 val = tcg_temp_new();
1413 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1414 tcg_gen_xori_i32(val, val, B7_0);
1415 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1416 tcg_temp_free(val);
1417 tcg_temp_free(addr);
1419 return;
1422 switch (ctx->opcode & 0xf08f) {
1423 case 0x408e: /* ldc Rm,Rn_BANK */
1424 CHECK_PRIVILEGED
1425 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1426 return;
1427 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1428 CHECK_PRIVILEGED
1429 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1430 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1431 return;
1432 case 0x0082: /* stc Rm_BANK,Rn */
1433 CHECK_PRIVILEGED
1434 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1435 return;
1436 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1437 CHECK_PRIVILEGED
1439 TCGv addr = tcg_temp_new();
1440 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1441 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1442 tcg_gen_mov_i32(REG(B11_8), addr);
1443 tcg_temp_free(addr);
1445 return;
1448 switch (ctx->opcode & 0xf0ff) {
1449 case 0x0023: /* braf Rn */
1450 CHECK_NOT_DELAY_SLOT
1451 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1452 ctx->flags |= DELAY_SLOT;
1453 ctx->delayed_pc = (uint32_t) - 1;
1454 return;
1455 case 0x0003: /* bsrf Rn */
1456 CHECK_NOT_DELAY_SLOT
1457 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1458 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1459 ctx->flags |= DELAY_SLOT;
1460 ctx->delayed_pc = (uint32_t) - 1;
1461 return;
1462 case 0x4015: /* cmp/pl Rn */
1463 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1464 return;
1465 case 0x4011: /* cmp/pz Rn */
1466 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1467 return;
1468 case 0x4010: /* dt Rn */
1469 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1470 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1471 return;
1472 case 0x402b: /* jmp @Rn */
1473 CHECK_NOT_DELAY_SLOT
1474 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1475 ctx->flags |= DELAY_SLOT;
1476 ctx->delayed_pc = (uint32_t) - 1;
1477 return;
1478 case 0x400b: /* jsr @Rn */
1479 CHECK_NOT_DELAY_SLOT
1480 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1481 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1482 ctx->flags |= DELAY_SLOT;
1483 ctx->delayed_pc = (uint32_t) - 1;
1484 return;
1485 case 0x400e: /* ldc Rm,SR */
1486 CHECK_PRIVILEGED
1487 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1488 ctx->bstate = BS_STOP;
1489 return;
1490 case 0x4007: /* ldc.l @Rm+,SR */
1491 CHECK_PRIVILEGED
1493 TCGv val = tcg_temp_new();
1494 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1495 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1496 tcg_temp_free(val);
1497 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1498 ctx->bstate = BS_STOP;
1500 return;
1501 case 0x0002: /* stc SR,Rn */
1502 CHECK_PRIVILEGED
1503 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1504 return;
1505 case 0x4003: /* stc SR,@-Rn */
1506 CHECK_PRIVILEGED
1508 TCGv addr = tcg_temp_new();
1509 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1510 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1511 tcg_gen_mov_i32(REG(B11_8), addr);
1512 tcg_temp_free(addr);
1514 return;
1515 #define LD(reg,ldnum,ldpnum,prechk) \
1516 case ldnum: \
1517 prechk \
1518 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1519 return; \
1520 case ldpnum: \
1521 prechk \
1522 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1523 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1524 return;
1525 #define ST(reg,stnum,stpnum,prechk) \
1526 case stnum: \
1527 prechk \
1528 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1529 return; \
1530 case stpnum: \
1531 prechk \
1533 TCGv addr = tcg_temp_new(); \
1534 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1535 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1536 tcg_gen_mov_i32(REG(B11_8), addr); \
1537 tcg_temp_free(addr); \
1539 return;
1540 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1541 LD(reg,ldnum,ldpnum,prechk) \
1542 ST(reg,stnum,stpnum,prechk)
1543 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1544 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1545 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1546 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1547 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1548 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1549 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1550 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1551 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1552 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1553 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1554 case 0x406a: /* lds Rm,FPSCR */
1555 CHECK_FPU_ENABLED
1556 gen_helper_ld_fpscr(REG(B11_8));
1557 ctx->bstate = BS_STOP;
1558 return;
1559 case 0x4066: /* lds.l @Rm+,FPSCR */
1560 CHECK_FPU_ENABLED
1562 TCGv addr = tcg_temp_new();
1563 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1564 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1565 gen_helper_ld_fpscr(addr);
1566 tcg_temp_free(addr);
1567 ctx->bstate = BS_STOP;
1569 return;
1570 case 0x006a: /* sts FPSCR,Rn */
1571 CHECK_FPU_ENABLED
1572 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1573 return;
1574 case 0x4062: /* sts FPSCR,@-Rn */
1575 CHECK_FPU_ENABLED
1577 TCGv addr, val;
1578 val = tcg_temp_new();
1579 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1580 addr = tcg_temp_new();
1581 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1582 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1583 tcg_gen_mov_i32(REG(B11_8), addr);
1584 tcg_temp_free(addr);
1585 tcg_temp_free(val);
1587 return;
1588 case 0x00c3: /* movca.l R0,@Rm */
1590 TCGv val = tcg_temp_new();
1591 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1592 gen_helper_movcal (REG(B11_8), val);
1593 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1595 ctx->has_movcal = 1;
1596 return;
1597 case 0x40a9:
1598 /* MOVUA.L @Rm,R0 (Rm) -> R0
1599 Load non-boundary-aligned data */
1600 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1601 return;
1602 case 0x40e9:
1603 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1604 Load non-boundary-aligned data */
1605 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1606 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1607 return;
1608 case 0x0029: /* movt Rn */
1609 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1610 return;
1611 case 0x0073:
1612 /* MOVCO.L
1613 LDST -> T
1614 If (T == 1) R0 -> (Rn)
1615 0 -> LDST
1617 if (ctx->features & SH_FEATURE_SH4A) {
1618 int label = gen_new_label();
1619 gen_clr_t();
1620 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1621 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1622 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1623 gen_set_label(label);
1624 tcg_gen_movi_i32(cpu_ldst, 0);
1625 return;
1626 } else
1627 break;
1628 case 0x0063:
1629 /* MOVLI.L @Rm,R0
1630 1 -> LDST
1631 (Rm) -> R0
1632 When interrupt/exception
1633 occurred 0 -> LDST
1635 if (ctx->features & SH_FEATURE_SH4A) {
1636 tcg_gen_movi_i32(cpu_ldst, 0);
1637 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1638 tcg_gen_movi_i32(cpu_ldst, 1);
1639 return;
1640 } else
1641 break;
1642 case 0x0093: /* ocbi @Rn */
1644 gen_helper_ocbi (REG(B11_8));
1646 return;
1647 case 0x00a3: /* ocbp @Rn */
1649 TCGv dummy = tcg_temp_new();
1650 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1651 tcg_temp_free(dummy);
1653 return;
1654 case 0x00b3: /* ocbwb @Rn */
1656 TCGv dummy = tcg_temp_new();
1657 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1658 tcg_temp_free(dummy);
1660 return;
1661 case 0x0083: /* pref @Rn */
1662 return;
1663 case 0x00d3: /* prefi @Rn */
1664 if (ctx->features & SH_FEATURE_SH4A)
1665 return;
1666 else
1667 break;
1668 case 0x00e3: /* icbi @Rn */
1669 if (ctx->features & SH_FEATURE_SH4A)
1670 return;
1671 else
1672 break;
1673 case 0x00ab: /* synco */
1674 if (ctx->features & SH_FEATURE_SH4A)
1675 return;
1676 else
1677 break;
1678 case 0x4024: /* rotcl Rn */
1680 TCGv tmp = tcg_temp_new();
1681 tcg_gen_mov_i32(tmp, cpu_sr);
1682 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1683 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1684 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1685 tcg_temp_free(tmp);
1687 return;
1688 case 0x4025: /* rotcr Rn */
1690 TCGv tmp = tcg_temp_new();
1691 tcg_gen_mov_i32(tmp, cpu_sr);
1692 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1693 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1694 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1695 tcg_temp_free(tmp);
1697 return;
1698 case 0x4004: /* rotl Rn */
1699 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1700 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1701 gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
1702 return;
1703 case 0x4005: /* rotr Rn */
1704 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1705 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1706 gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
1707 return;
1708 case 0x4000: /* shll Rn */
1709 case 0x4020: /* shal Rn */
1710 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1711 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1712 return;
1713 case 0x4021: /* shar Rn */
1714 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1715 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1716 return;
1717 case 0x4001: /* shlr Rn */
1718 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1719 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1720 return;
1721 case 0x4008: /* shll2 Rn */
1722 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1723 return;
1724 case 0x4018: /* shll8 Rn */
1725 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1726 return;
1727 case 0x4028: /* shll16 Rn */
1728 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1729 return;
1730 case 0x4009: /* shlr2 Rn */
1731 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1732 return;
1733 case 0x4019: /* shlr8 Rn */
1734 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1735 return;
1736 case 0x4029: /* shlr16 Rn */
1737 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1738 return;
1739 case 0x401b: /* tas.b @Rn */
1741 TCGv addr, val;
1742 addr = tcg_temp_local_new();
1743 tcg_gen_mov_i32(addr, REG(B11_8));
1744 val = tcg_temp_local_new();
1745 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1746 gen_cmp_imm(TCG_COND_EQ, val, 0);
1747 tcg_gen_ori_i32(val, val, 0x80);
1748 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1749 tcg_temp_free(val);
1750 tcg_temp_free(addr);
1752 return;
1753 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1754 CHECK_FPU_ENABLED
1755 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1756 return;
1757 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1758 CHECK_FPU_ENABLED
1759 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1760 return;
1761 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1762 CHECK_FPU_ENABLED
1763 if (ctx->fpscr & FPSCR_PR) {
1764 TCGv_i64 fp;
1765 if (ctx->opcode & 0x0100)
1766 break; /* illegal instruction */
1767 fp = tcg_temp_new_i64();
1768 gen_helper_float_DT(fp, cpu_fpul);
1769 gen_store_fpr64(fp, DREG(B11_8));
1770 tcg_temp_free_i64(fp);
1772 else {
1773 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1775 return;
1776 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1777 CHECK_FPU_ENABLED
1778 if (ctx->fpscr & FPSCR_PR) {
1779 TCGv_i64 fp;
1780 if (ctx->opcode & 0x0100)
1781 break; /* illegal instruction */
1782 fp = tcg_temp_new_i64();
1783 gen_load_fpr64(fp, DREG(B11_8));
1784 gen_helper_ftrc_DT(cpu_fpul, fp);
1785 tcg_temp_free_i64(fp);
1787 else {
1788 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1790 return;
1791 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1792 CHECK_FPU_ENABLED
1794 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1796 return;
1797 case 0xf05d: /* fabs FRn/DRn */
1798 CHECK_FPU_ENABLED
1799 if (ctx->fpscr & FPSCR_PR) {
1800 if (ctx->opcode & 0x0100)
1801 break; /* illegal instruction */
1802 TCGv_i64 fp = tcg_temp_new_i64();
1803 gen_load_fpr64(fp, DREG(B11_8));
1804 gen_helper_fabs_DT(fp, fp);
1805 gen_store_fpr64(fp, DREG(B11_8));
1806 tcg_temp_free_i64(fp);
1807 } else {
1808 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1810 return;
1811 case 0xf06d: /* fsqrt FRn */
1812 CHECK_FPU_ENABLED
1813 if (ctx->fpscr & FPSCR_PR) {
1814 if (ctx->opcode & 0x0100)
1815 break; /* illegal instruction */
1816 TCGv_i64 fp = tcg_temp_new_i64();
1817 gen_load_fpr64(fp, DREG(B11_8));
1818 gen_helper_fsqrt_DT(fp, fp);
1819 gen_store_fpr64(fp, DREG(B11_8));
1820 tcg_temp_free_i64(fp);
1821 } else {
1822 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1824 return;
1825 case 0xf07d: /* fsrra FRn */
1826 CHECK_FPU_ENABLED
1827 break;
1828 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1829 CHECK_FPU_ENABLED
1830 if (!(ctx->fpscr & FPSCR_PR)) {
1831 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1833 return;
1834 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1835 CHECK_FPU_ENABLED
1836 if (!(ctx->fpscr & FPSCR_PR)) {
1837 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1839 return;
1840 case 0xf0ad: /* fcnvsd FPUL,DRn */
1841 CHECK_FPU_ENABLED
1843 TCGv_i64 fp = tcg_temp_new_i64();
1844 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1845 gen_store_fpr64(fp, DREG(B11_8));
1846 tcg_temp_free_i64(fp);
1848 return;
1849 case 0xf0bd: /* fcnvds DRn,FPUL */
1850 CHECK_FPU_ENABLED
1852 TCGv_i64 fp = tcg_temp_new_i64();
1853 gen_load_fpr64(fp, DREG(B11_8));
1854 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1855 tcg_temp_free_i64(fp);
1857 return;
1858 case 0xf0ed: /* fipr FVm,FVn */
1859 CHECK_FPU_ENABLED
1860 if ((ctx->fpscr & FPSCR_PR) == 0) {
1861 TCGv m, n;
1862 m = tcg_const_i32((ctx->opcode >> 16) & 3);
1863 n = tcg_const_i32((ctx->opcode >> 18) & 3);
1864 gen_helper_fipr(m, n);
1865 tcg_temp_free(m);
1866 tcg_temp_free(n);
1867 return;
1869 break;
1870 case 0xf0fd: /* ftrv XMTRX,FVn */
1871 CHECK_FPU_ENABLED
1872 if ((ctx->opcode & 0x0300) == 0x0100 &&
1873 (ctx->fpscr & FPSCR_PR) == 0) {
1874 TCGv n;
1875 n = tcg_const_i32((ctx->opcode >> 18) & 3);
1876 gen_helper_ftrv(n);
1877 tcg_temp_free(n);
1878 return;
1880 break;
1882 #if 0
1883 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1884 ctx->opcode, ctx->pc);
1885 fflush(stderr);
1886 #endif
1887 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1888 gen_helper_raise_slot_illegal_instruction();
1889 } else {
1890 gen_helper_raise_illegal_instruction();
1892 ctx->bstate = BS_EXCP;
1895 static void decode_opc(DisasContext * ctx)
1897 uint32_t old_flags = ctx->flags;
1899 _decode_opc(ctx);
1901 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1902 if (ctx->flags & DELAY_SLOT_CLEARME) {
1903 gen_store_flags(0);
1904 } else {
1905 /* go out of the delay slot */
1906 uint32_t new_flags = ctx->flags;
1907 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1908 gen_store_flags(new_flags);
1910 ctx->flags = 0;
1911 ctx->bstate = BS_BRANCH;
1912 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1913 gen_delayed_conditional_jump(ctx);
1914 } else if (old_flags & DELAY_SLOT) {
1915 gen_jump(ctx);
1920 /* go into a delay slot */
1921 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1922 gen_store_flags(ctx->flags);
1925 static inline void
1926 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1927 int search_pc)
1929 DisasContext ctx;
1930 target_ulong pc_start;
1931 static uint16_t *gen_opc_end;
1932 CPUBreakpoint *bp;
1933 int i, ii;
1934 int num_insns;
1935 int max_insns;
1937 pc_start = tb->pc;
1938 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1939 ctx.pc = pc_start;
1940 ctx.flags = (uint32_t)tb->flags;
1941 ctx.bstate = BS_NONE;
1942 ctx.sr = env->sr;
1943 ctx.fpscr = env->fpscr;
1944 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1945 /* We don't know if the delayed pc came from a dynamic or static branch,
1946 so assume it is a dynamic branch. */
1947 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1948 ctx.tb = tb;
1949 ctx.singlestep_enabled = env->singlestep_enabled;
1950 ctx.features = env->features;
1951 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1953 ii = -1;
1954 num_insns = 0;
1955 max_insns = tb->cflags & CF_COUNT_MASK;
1956 if (max_insns == 0)
1957 max_insns = CF_COUNT_MASK;
1958 gen_icount_start();
1959 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1960 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1961 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1962 if (ctx.pc == bp->pc) {
1963 /* We have hit a breakpoint - make sure PC is up-to-date */
1964 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1965 gen_helper_debug();
1966 ctx.bstate = BS_EXCP;
1967 break;
1971 if (search_pc) {
1972 i = gen_opc_ptr - gen_opc_buf;
1973 if (ii < i) {
1974 ii++;
1975 while (ii < i)
1976 gen_opc_instr_start[ii++] = 0;
1978 gen_opc_pc[ii] = ctx.pc;
1979 gen_opc_hflags[ii] = ctx.flags;
1980 gen_opc_instr_start[ii] = 1;
1981 gen_opc_icount[ii] = num_insns;
1983 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1984 gen_io_start();
1985 #if 0
1986 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1987 fflush(stderr);
1988 #endif
1989 ctx.opcode = lduw_code(ctx.pc);
1990 decode_opc(&ctx);
1991 num_insns++;
1992 ctx.pc += 2;
1993 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1994 break;
1995 if (env->singlestep_enabled)
1996 break;
1997 if (num_insns >= max_insns)
1998 break;
1999 if (singlestep)
2000 break;
2002 if (tb->cflags & CF_LAST_IO)
2003 gen_io_end();
2004 if (env->singlestep_enabled) {
2005 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2006 gen_helper_debug();
2007 } else {
2008 switch (ctx.bstate) {
2009 case BS_STOP:
2010 /* gen_op_interrupt_restart(); */
2011 /* fall through */
2012 case BS_NONE:
2013 if (ctx.flags) {
2014 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2016 gen_goto_tb(&ctx, 0, ctx.pc);
2017 break;
2018 case BS_EXCP:
2019 /* gen_op_interrupt_restart(); */
2020 tcg_gen_exit_tb(0);
2021 break;
2022 case BS_BRANCH:
2023 default:
2024 break;
2028 gen_icount_end(tb, num_insns);
2029 *gen_opc_ptr = INDEX_op_end;
2030 if (search_pc) {
2031 i = gen_opc_ptr - gen_opc_buf;
2032 ii++;
2033 while (ii <= i)
2034 gen_opc_instr_start[ii++] = 0;
2035 } else {
2036 tb->size = ctx.pc - pc_start;
2037 tb->icount = num_insns;
2040 #ifdef DEBUG_DISAS
2041 #ifdef SH4_DEBUG_DISAS
2042 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2043 #endif
2044 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2045 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2046 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2047 qemu_log("\n");
2049 #endif
2052 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2054 gen_intermediate_code_internal(env, tb, 0);
2057 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2059 gen_intermediate_code_internal(env, tb, 1);
2062 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2063 unsigned long searched_pc, int pc_pos, void *puc)
2065 env->pc = gen_opc_pc[pc_pos];
2066 env->flags = gen_opc_hflags[pc_pos];