target-sh4: add fipr instruction
[qemu.git] / target-sh4 / translate.c
blob557550f8e1d64e1cd24fdb0525727daa6ecacc3d
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #define DEBUG_DISAS
26 #define SH4_DEBUG_DISAS
27 //#define SH4_SINGLE_STEP
29 #include "cpu.h"
30 #include "exec-all.h"
31 #include "disas.h"
32 #include "tcg-op.h"
33 #include "qemu-common.h"
35 #include "helper.h"
36 #define GEN_HELPER 1
37 #include "helper.h"
39 typedef struct DisasContext {
40 struct TranslationBlock *tb;
41 target_ulong pc;
42 uint32_t sr;
43 uint32_t fpscr;
44 uint16_t opcode;
45 uint32_t flags;
46 int bstate;
47 int memidx;
48 uint32_t delayed_pc;
49 int singlestep_enabled;
50 uint32_t features;
51 int has_movcal;
52 } DisasContext;
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(ctx) 1
56 #else
57 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
58 #endif
60 enum {
61 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
62 * exception condition
64 BS_STOP = 1, /* We want to stop translation for any reason */
65 BS_BRANCH = 2, /* We reached a branch condition */
66 BS_EXCP = 3, /* We reached an exception condition */
69 /* global register indexes */
70 static TCGv_ptr cpu_env;
71 static TCGv cpu_gregs[24];
72 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
73 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
74 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
75 static TCGv cpu_fregs[32];
77 /* internal register indexes */
78 static TCGv cpu_flags, cpu_delayed_pc;
80 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
82 #include "gen-icount.h"
84 static void sh4_translate_init(void)
86 int i;
87 static int done_init = 0;
88 static const char * const gregnames[24] = {
89 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
90 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
91 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
92 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
93 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
95 static const char * const fregnames[32] = {
96 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
97 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
98 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
99 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
100 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
101 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
102 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
103 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
106 if (done_init)
107 return;
109 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111 for (i = 0; i < 24; i++)
112 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUState, gregs[i]),
114 gregnames[i]);
116 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, pc), "PC");
118 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, sr), "SR");
120 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, ssr), "SSR");
122 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, spc), "SPC");
124 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, gbr), "GBR");
126 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, vbr), "VBR");
128 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUState, sgr), "SGR");
130 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUState, dbr), "DBR");
132 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUState, mach), "MACH");
134 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUState, macl), "MACL");
136 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUState, pr), "PR");
138 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUState, fpscr), "FPSCR");
140 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
141 offsetof(CPUState, fpul), "FPUL");
143 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUState, flags), "_flags_");
145 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUState, delayed_pc),
147 "_delayed_pc_");
148 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
149 offsetof(CPUState, ldst), "_ldst_");
151 for (i = 0; i < 32; i++)
152 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
153 offsetof(CPUState, fregs[i]),
154 fregnames[i]);
156 /* register helpers */
157 #define GEN_HELPER 2
158 #include "helper.h"
160 done_init = 1;
163 void cpu_dump_state(CPUState * env, FILE * f,
164 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
165 int flags)
167 int i;
168 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
169 env->pc, env->sr, env->pr, env->fpscr);
170 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
171 env->spc, env->ssr, env->gbr, env->vbr);
172 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
173 env->sgr, env->dbr, env->delayed_pc, env->fpul);
174 for (i = 0; i < 24; i += 4) {
175 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
176 i, env->gregs[i], i + 1, env->gregs[i + 1],
177 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
179 if (env->flags & DELAY_SLOT) {
180 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
183 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void cpu_sh4_reset(CPUSH4State * env)
190 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
191 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
192 log_cpu_state(env, 0);
195 #if defined(CONFIG_USER_ONLY)
196 env->sr = 0;
197 #else
198 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
199 #endif
200 env->vbr = 0;
201 env->pc = 0xA0000000;
202 #if defined(CONFIG_USER_ONLY)
203 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
204 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
205 #else
206 env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
207 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
208 set_flush_to_zero(1, &env->fp_status);
209 #endif
210 set_default_nan_mode(1, &env->fp_status);
211 env->mmucr = 0;
214 typedef struct {
215 const char *name;
216 int id;
217 uint32_t pvr;
218 uint32_t prr;
219 uint32_t cvr;
220 uint32_t features;
221 } sh4_def_t;
223 static sh4_def_t sh4_defs[] = {
225 .name = "SH7750R",
226 .id = SH_CPU_SH7750R,
227 .pvr = 0x00050000,
228 .prr = 0x00000100,
229 .cvr = 0x00110000,
230 .features = SH_FEATURE_BCR3_AND_BCR4,
231 }, {
232 .name = "SH7751R",
233 .id = SH_CPU_SH7751R,
234 .pvr = 0x04050005,
235 .prr = 0x00000113,
236 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
237 .features = SH_FEATURE_BCR3_AND_BCR4,
238 }, {
239 .name = "SH7785",
240 .id = SH_CPU_SH7785,
241 .pvr = 0x10300700,
242 .prr = 0x00000200,
243 .cvr = 0x71440211,
244 .features = SH_FEATURE_SH4A,
248 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
250 int i;
252 if (strcasecmp(name, "any") == 0)
253 return &sh4_defs[0];
255 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
256 if (strcasecmp(name, sh4_defs[i].name) == 0)
257 return &sh4_defs[i];
259 return NULL;
262 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
264 int i;
266 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
267 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
270 static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
272 env->pvr = def->pvr;
273 env->prr = def->prr;
274 env->cvr = def->cvr;
275 env->id = def->id;
278 CPUSH4State *cpu_sh4_init(const char *cpu_model)
280 CPUSH4State *env;
281 const sh4_def_t *def;
283 def = cpu_sh4_find_by_name(cpu_model);
284 if (!def)
285 return NULL;
286 env = qemu_mallocz(sizeof(CPUSH4State));
287 env->features = def->features;
288 cpu_exec_init(env);
289 env->movcal_backup_tail = &(env->movcal_backup);
290 sh4_translate_init();
291 env->cpu_model_str = cpu_model;
292 cpu_sh4_reset(env);
293 cpu_sh4_register(env, def);
294 tlb_flush(env, 1);
295 qemu_init_vcpu(env);
296 return env;
299 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
301 TranslationBlock *tb;
302 tb = ctx->tb;
304 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
305 !ctx->singlestep_enabled) {
306 /* Use a direct jump if in same page and singlestep not enabled */
307 tcg_gen_goto_tb(n);
308 tcg_gen_movi_i32(cpu_pc, dest);
309 tcg_gen_exit_tb((long) tb + n);
310 } else {
311 tcg_gen_movi_i32(cpu_pc, dest);
312 if (ctx->singlestep_enabled)
313 gen_helper_debug();
314 tcg_gen_exit_tb(0);
318 static void gen_jump(DisasContext * ctx)
320 if (ctx->delayed_pc == (uint32_t) - 1) {
321 /* Target is not statically known, it comes necessarily from a
322 delayed jump as immediate jump are conditinal jumps */
323 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
324 if (ctx->singlestep_enabled)
325 gen_helper_debug();
326 tcg_gen_exit_tb(0);
327 } else {
328 gen_goto_tb(ctx, 0, ctx->delayed_pc);
332 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
334 TCGv sr;
335 int label = gen_new_label();
336 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
337 sr = tcg_temp_new();
338 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
339 tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
340 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
341 gen_set_label(label);
344 /* Immediate conditional jump (bt or bf) */
345 static void gen_conditional_jump(DisasContext * ctx,
346 target_ulong ift, target_ulong ifnott)
348 int l1;
349 TCGv sr;
351 l1 = gen_new_label();
352 sr = tcg_temp_new();
353 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
354 tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
355 gen_goto_tb(ctx, 0, ifnott);
356 gen_set_label(l1);
357 gen_goto_tb(ctx, 1, ift);
360 /* Delayed conditional jump (bt or bf) */
361 static void gen_delayed_conditional_jump(DisasContext * ctx)
363 int l1;
364 TCGv ds;
366 l1 = gen_new_label();
367 ds = tcg_temp_new();
368 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
369 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
370 gen_goto_tb(ctx, 1, ctx->pc + 2);
371 gen_set_label(l1);
372 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
373 gen_jump(ctx);
376 static inline void gen_set_t(void)
378 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
381 static inline void gen_clr_t(void)
383 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
386 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
388 int label1 = gen_new_label();
389 int label2 = gen_new_label();
390 tcg_gen_brcond_i32(cond, t1, t0, label1);
391 gen_clr_t();
392 tcg_gen_br(label2);
393 gen_set_label(label1);
394 gen_set_t();
395 gen_set_label(label2);
398 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
400 int label1 = gen_new_label();
401 int label2 = gen_new_label();
402 tcg_gen_brcondi_i32(cond, t0, imm, label1);
403 gen_clr_t();
404 tcg_gen_br(label2);
405 gen_set_label(label1);
406 gen_set_t();
407 gen_set_label(label2);
410 static inline void gen_store_flags(uint32_t flags)
412 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
413 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
416 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
418 TCGv tmp = tcg_temp_new();
420 p0 &= 0x1f;
421 p1 &= 0x1f;
423 tcg_gen_andi_i32(tmp, t1, (1 << p1));
424 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
425 if (p0 < p1)
426 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
427 else if (p0 > p1)
428 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
429 tcg_gen_or_i32(t0, t0, tmp);
431 tcg_temp_free(tmp);
434 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
436 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
439 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
441 TCGv_i32 tmp = tcg_temp_new_i32();
442 tcg_gen_trunc_i64_i32(tmp, t);
443 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
444 tcg_gen_shri_i64(t, t, 32);
445 tcg_gen_trunc_i64_i32(tmp, t);
446 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
447 tcg_temp_free_i32(tmp);
450 #define B3_0 (ctx->opcode & 0xf)
451 #define B6_4 ((ctx->opcode >> 4) & 0x7)
452 #define B7_4 ((ctx->opcode >> 4) & 0xf)
453 #define B7_0 (ctx->opcode & 0xff)
454 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
455 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
456 (ctx->opcode & 0xfff))
457 #define B11_8 ((ctx->opcode >> 8) & 0xf)
458 #define B15_12 ((ctx->opcode >> 12) & 0xf)
460 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
461 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
463 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
464 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
466 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
467 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
468 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
469 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
471 #define CHECK_NOT_DELAY_SLOT \
472 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
474 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
475 gen_helper_raise_slot_illegal_instruction(); \
476 ctx->bstate = BS_EXCP; \
477 return; \
480 #define CHECK_PRIVILEGED \
481 if (IS_USER(ctx)) { \
482 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
483 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
484 gen_helper_raise_slot_illegal_instruction(); \
485 } else { \
486 gen_helper_raise_illegal_instruction(); \
488 ctx->bstate = BS_EXCP; \
489 return; \
492 #define CHECK_FPU_ENABLED \
493 if (ctx->flags & SR_FD) { \
494 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
495 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
496 gen_helper_raise_slot_fpu_disable(); \
497 } else { \
498 gen_helper_raise_fpu_disable(); \
500 ctx->bstate = BS_EXCP; \
501 return; \
504 static void _decode_opc(DisasContext * ctx)
506 /* This code tries to make movcal emulation sufficiently
507 accurate for Linux purposes. This instruction writes
508 memory, and prior to that, always allocates a cache line.
509 It is used in two contexts:
510 - in memcpy, where data is copied in blocks, the first write
511 of to a block uses movca.l for performance.
512 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
513 to flush the cache. Here, the data written by movcal.l is never
514 written to memory, and the data written is just bogus.
516 To simulate this, we simulate movcal.l, we store the value to memory,
517 but we also remember the previous content. If we see ocbi, we check
518 if movcal.l for that address was done previously. If so, the write should
519 not have hit the memory, so we restore the previous content.
520 When we see an instruction that is neither movca.l
521 nor ocbi, the previous content is discarded.
523 To optimize, we only try to flush stores when we're at the start of
524 TB, or if we already saw movca.l in this TB and did not flush stores
525 yet. */
526 if (ctx->has_movcal)
528 int opcode = ctx->opcode & 0xf0ff;
529 if (opcode != 0x0093 /* ocbi */
530 && opcode != 0x00c3 /* movca.l */)
532 gen_helper_discard_movcal_backup ();
533 ctx->has_movcal = 0;
537 #if 0
538 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
539 #endif
541 switch (ctx->opcode) {
542 case 0x0019: /* div0u */
543 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
544 return;
545 case 0x000b: /* rts */
546 CHECK_NOT_DELAY_SLOT
547 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
548 ctx->flags |= DELAY_SLOT;
549 ctx->delayed_pc = (uint32_t) - 1;
550 return;
551 case 0x0028: /* clrmac */
552 tcg_gen_movi_i32(cpu_mach, 0);
553 tcg_gen_movi_i32(cpu_macl, 0);
554 return;
555 case 0x0048: /* clrs */
556 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
557 return;
558 case 0x0008: /* clrt */
559 gen_clr_t();
560 return;
561 case 0x0038: /* ldtlb */
562 CHECK_PRIVILEGED
563 gen_helper_ldtlb();
564 return;
565 case 0x002b: /* rte */
566 CHECK_PRIVILEGED
567 CHECK_NOT_DELAY_SLOT
568 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
569 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
570 ctx->flags |= DELAY_SLOT;
571 ctx->delayed_pc = (uint32_t) - 1;
572 return;
573 case 0x0058: /* sets */
574 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
575 return;
576 case 0x0018: /* sett */
577 gen_set_t();
578 return;
579 case 0xfbfd: /* frchg */
580 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
581 ctx->bstate = BS_STOP;
582 return;
583 case 0xf3fd: /* fschg */
584 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
585 ctx->bstate = BS_STOP;
586 return;
587 case 0x0009: /* nop */
588 return;
589 case 0x001b: /* sleep */
590 CHECK_PRIVILEGED
591 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
592 return;
595 switch (ctx->opcode & 0xf000) {
596 case 0x1000: /* mov.l Rm,@(disp,Rn) */
598 TCGv addr = tcg_temp_new();
599 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
600 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
601 tcg_temp_free(addr);
603 return;
604 case 0x5000: /* mov.l @(disp,Rm),Rn */
606 TCGv addr = tcg_temp_new();
607 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
608 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
609 tcg_temp_free(addr);
611 return;
612 case 0xe000: /* mov #imm,Rn */
613 tcg_gen_movi_i32(REG(B11_8), B7_0s);
614 return;
615 case 0x9000: /* mov.w @(disp,PC),Rn */
617 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
618 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
619 tcg_temp_free(addr);
621 return;
622 case 0xd000: /* mov.l @(disp,PC),Rn */
624 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
625 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
626 tcg_temp_free(addr);
628 return;
629 case 0x7000: /* add #imm,Rn */
630 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
631 return;
632 case 0xa000: /* bra disp */
633 CHECK_NOT_DELAY_SLOT
634 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
635 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
636 ctx->flags |= DELAY_SLOT;
637 return;
638 case 0xb000: /* bsr disp */
639 CHECK_NOT_DELAY_SLOT
640 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
641 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
642 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
643 ctx->flags |= DELAY_SLOT;
644 return;
647 switch (ctx->opcode & 0xf00f) {
648 case 0x6003: /* mov Rm,Rn */
649 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
650 return;
651 case 0x2000: /* mov.b Rm,@Rn */
652 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
653 return;
654 case 0x2001: /* mov.w Rm,@Rn */
655 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
656 return;
657 case 0x2002: /* mov.l Rm,@Rn */
658 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
659 return;
660 case 0x6000: /* mov.b @Rm,Rn */
661 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
662 return;
663 case 0x6001: /* mov.w @Rm,Rn */
664 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
665 return;
666 case 0x6002: /* mov.l @Rm,Rn */
667 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
668 return;
669 case 0x2004: /* mov.b Rm,@-Rn */
671 TCGv addr = tcg_temp_new();
672 tcg_gen_subi_i32(addr, REG(B11_8), 1);
673 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
674 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
675 tcg_temp_free(addr);
677 return;
678 case 0x2005: /* mov.w Rm,@-Rn */
680 TCGv addr = tcg_temp_new();
681 tcg_gen_subi_i32(addr, REG(B11_8), 2);
682 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
683 tcg_gen_mov_i32(REG(B11_8), addr);
684 tcg_temp_free(addr);
686 return;
687 case 0x2006: /* mov.l Rm,@-Rn */
689 TCGv addr = tcg_temp_new();
690 tcg_gen_subi_i32(addr, REG(B11_8), 4);
691 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
692 tcg_gen_mov_i32(REG(B11_8), addr);
694 return;
695 case 0x6004: /* mov.b @Rm+,Rn */
696 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
697 if ( B11_8 != B7_4 )
698 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
699 return;
700 case 0x6005: /* mov.w @Rm+,Rn */
701 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
702 if ( B11_8 != B7_4 )
703 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
704 return;
705 case 0x6006: /* mov.l @Rm+,Rn */
706 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
707 if ( B11_8 != B7_4 )
708 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
709 return;
710 case 0x0004: /* mov.b Rm,@(R0,Rn) */
712 TCGv addr = tcg_temp_new();
713 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
714 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
715 tcg_temp_free(addr);
717 return;
718 case 0x0005: /* mov.w Rm,@(R0,Rn) */
720 TCGv addr = tcg_temp_new();
721 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
722 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
723 tcg_temp_free(addr);
725 return;
726 case 0x0006: /* mov.l Rm,@(R0,Rn) */
728 TCGv addr = tcg_temp_new();
729 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
730 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
731 tcg_temp_free(addr);
733 return;
734 case 0x000c: /* mov.b @(R0,Rm),Rn */
736 TCGv addr = tcg_temp_new();
737 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
738 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
739 tcg_temp_free(addr);
741 return;
742 case 0x000d: /* mov.w @(R0,Rm),Rn */
744 TCGv addr = tcg_temp_new();
745 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
746 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
747 tcg_temp_free(addr);
749 return;
750 case 0x000e: /* mov.l @(R0,Rm),Rn */
752 TCGv addr = tcg_temp_new();
753 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
754 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
755 tcg_temp_free(addr);
757 return;
758 case 0x6008: /* swap.b Rm,Rn */
760 TCGv high, low;
761 high = tcg_temp_new();
762 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
763 low = tcg_temp_new();
764 tcg_gen_ext16u_i32(low, REG(B7_4));
765 tcg_gen_bswap16_i32(low, low);
766 tcg_gen_or_i32(REG(B11_8), high, low);
767 tcg_temp_free(low);
768 tcg_temp_free(high);
770 return;
771 case 0x6009: /* swap.w Rm,Rn */
773 TCGv high, low;
774 high = tcg_temp_new();
775 tcg_gen_shli_i32(high, REG(B7_4), 16);
776 low = tcg_temp_new();
777 tcg_gen_shri_i32(low, REG(B7_4), 16);
778 tcg_gen_ext16u_i32(low, low);
779 tcg_gen_or_i32(REG(B11_8), high, low);
780 tcg_temp_free(low);
781 tcg_temp_free(high);
783 return;
784 case 0x200d: /* xtrct Rm,Rn */
786 TCGv high, low;
787 high = tcg_temp_new();
788 tcg_gen_shli_i32(high, REG(B7_4), 16);
789 low = tcg_temp_new();
790 tcg_gen_shri_i32(low, REG(B11_8), 16);
791 tcg_gen_ext16u_i32(low, low);
792 tcg_gen_or_i32(REG(B11_8), high, low);
793 tcg_temp_free(low);
794 tcg_temp_free(high);
796 return;
797 case 0x300c: /* add Rm,Rn */
798 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
799 return;
800 case 0x300e: /* addc Rm,Rn */
801 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
802 return;
803 case 0x300f: /* addv Rm,Rn */
804 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
805 return;
806 case 0x2009: /* and Rm,Rn */
807 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
808 return;
809 case 0x3000: /* cmp/eq Rm,Rn */
810 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
811 return;
812 case 0x3003: /* cmp/ge Rm,Rn */
813 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
814 return;
815 case 0x3007: /* cmp/gt Rm,Rn */
816 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
817 return;
818 case 0x3006: /* cmp/hi Rm,Rn */
819 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
820 return;
821 case 0x3002: /* cmp/hs Rm,Rn */
822 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
823 return;
824 case 0x200c: /* cmp/str Rm,Rn */
826 int label1 = gen_new_label();
827 int label2 = gen_new_label();
828 TCGv cmp1 = tcg_temp_local_new();
829 TCGv cmp2 = tcg_temp_local_new();
830 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
831 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
832 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
833 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
834 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
835 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
836 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
837 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
838 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
839 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
840 tcg_gen_br(label2);
841 gen_set_label(label1);
842 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
843 gen_set_label(label2);
844 tcg_temp_free(cmp2);
845 tcg_temp_free(cmp1);
847 return;
848 case 0x2007: /* div0s Rm,Rn */
850 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
851 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
852 TCGv val = tcg_temp_new();
853 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
854 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
855 tcg_temp_free(val);
857 return;
858 case 0x3004: /* div1 Rm,Rn */
859 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
860 return;
861 case 0x300d: /* dmuls.l Rm,Rn */
863 TCGv_i64 tmp1 = tcg_temp_new_i64();
864 TCGv_i64 tmp2 = tcg_temp_new_i64();
866 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
867 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
868 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
869 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
870 tcg_gen_shri_i64(tmp1, tmp1, 32);
871 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
873 tcg_temp_free_i64(tmp2);
874 tcg_temp_free_i64(tmp1);
876 return;
877 case 0x3005: /* dmulu.l Rm,Rn */
879 TCGv_i64 tmp1 = tcg_temp_new_i64();
880 TCGv_i64 tmp2 = tcg_temp_new_i64();
882 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
883 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
884 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
885 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
886 tcg_gen_shri_i64(tmp1, tmp1, 32);
887 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
889 tcg_temp_free_i64(tmp2);
890 tcg_temp_free_i64(tmp1);
892 return;
893 case 0x600e: /* exts.b Rm,Rn */
894 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
895 return;
896 case 0x600f: /* exts.w Rm,Rn */
897 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
898 return;
899 case 0x600c: /* extu.b Rm,Rn */
900 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
901 return;
902 case 0x600d: /* extu.w Rm,Rn */
903 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
904 return;
905 case 0x000f: /* mac.l @Rm+,@Rn+ */
907 TCGv arg0, arg1;
908 arg0 = tcg_temp_new();
909 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
910 arg1 = tcg_temp_new();
911 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
912 gen_helper_macl(arg0, arg1);
913 tcg_temp_free(arg1);
914 tcg_temp_free(arg0);
915 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
916 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
918 return;
919 case 0x400f: /* mac.w @Rm+,@Rn+ */
921 TCGv arg0, arg1;
922 arg0 = tcg_temp_new();
923 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
924 arg1 = tcg_temp_new();
925 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
926 gen_helper_macw(arg0, arg1);
927 tcg_temp_free(arg1);
928 tcg_temp_free(arg0);
929 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
930 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
932 return;
933 case 0x0007: /* mul.l Rm,Rn */
934 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
935 return;
936 case 0x200f: /* muls.w Rm,Rn */
938 TCGv arg0, arg1;
939 arg0 = tcg_temp_new();
940 tcg_gen_ext16s_i32(arg0, REG(B7_4));
941 arg1 = tcg_temp_new();
942 tcg_gen_ext16s_i32(arg1, REG(B11_8));
943 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
944 tcg_temp_free(arg1);
945 tcg_temp_free(arg0);
947 return;
948 case 0x200e: /* mulu.w Rm,Rn */
950 TCGv arg0, arg1;
951 arg0 = tcg_temp_new();
952 tcg_gen_ext16u_i32(arg0, REG(B7_4));
953 arg1 = tcg_temp_new();
954 tcg_gen_ext16u_i32(arg1, REG(B11_8));
955 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
956 tcg_temp_free(arg1);
957 tcg_temp_free(arg0);
959 return;
960 case 0x600b: /* neg Rm,Rn */
961 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
962 return;
963 case 0x600a: /* negc Rm,Rn */
964 gen_helper_negc(REG(B11_8), REG(B7_4));
965 return;
966 case 0x6007: /* not Rm,Rn */
967 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
968 return;
969 case 0x200b: /* or Rm,Rn */
970 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
971 return;
972 case 0x400c: /* shad Rm,Rn */
974 int label1 = gen_new_label();
975 int label2 = gen_new_label();
976 int label3 = gen_new_label();
977 int label4 = gen_new_label();
978 TCGv shift;
979 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
980 /* Rm positive, shift to the left */
981 shift = tcg_temp_new();
982 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
983 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
984 tcg_temp_free(shift);
985 tcg_gen_br(label4);
986 /* Rm negative, shift to the right */
987 gen_set_label(label1);
988 shift = tcg_temp_new();
989 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
990 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
991 tcg_gen_not_i32(shift, REG(B7_4));
992 tcg_gen_andi_i32(shift, shift, 0x1f);
993 tcg_gen_addi_i32(shift, shift, 1);
994 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
995 tcg_temp_free(shift);
996 tcg_gen_br(label4);
997 /* Rm = -32 */
998 gen_set_label(label2);
999 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
1000 tcg_gen_movi_i32(REG(B11_8), 0);
1001 tcg_gen_br(label4);
1002 gen_set_label(label3);
1003 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1004 gen_set_label(label4);
1006 return;
1007 case 0x400d: /* shld Rm,Rn */
1009 int label1 = gen_new_label();
1010 int label2 = gen_new_label();
1011 int label3 = gen_new_label();
1012 TCGv shift;
1013 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1014 /* Rm positive, shift to the left */
1015 shift = tcg_temp_new();
1016 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1017 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1018 tcg_temp_free(shift);
1019 tcg_gen_br(label3);
1020 /* Rm negative, shift to the right */
1021 gen_set_label(label1);
1022 shift = tcg_temp_new();
1023 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1024 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1025 tcg_gen_not_i32(shift, REG(B7_4));
1026 tcg_gen_andi_i32(shift, shift, 0x1f);
1027 tcg_gen_addi_i32(shift, shift, 1);
1028 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1029 tcg_temp_free(shift);
1030 tcg_gen_br(label3);
1031 /* Rm = -32 */
1032 gen_set_label(label2);
1033 tcg_gen_movi_i32(REG(B11_8), 0);
1034 gen_set_label(label3);
1036 return;
1037 case 0x3008: /* sub Rm,Rn */
1038 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1039 return;
1040 case 0x300a: /* subc Rm,Rn */
1041 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1042 return;
1043 case 0x300b: /* subv Rm,Rn */
1044 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1045 return;
1046 case 0x2008: /* tst Rm,Rn */
1048 TCGv val = tcg_temp_new();
1049 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1050 gen_cmp_imm(TCG_COND_EQ, val, 0);
1051 tcg_temp_free(val);
1053 return;
1054 case 0x200a: /* xor Rm,Rn */
1055 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1056 return;
1057 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1058 CHECK_FPU_ENABLED
1059 if (ctx->fpscr & FPSCR_SZ) {
1060 TCGv_i64 fp = tcg_temp_new_i64();
1061 gen_load_fpr64(fp, XREG(B7_4));
1062 gen_store_fpr64(fp, XREG(B11_8));
1063 tcg_temp_free_i64(fp);
1064 } else {
1065 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1067 return;
1068 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1069 CHECK_FPU_ENABLED
1070 if (ctx->fpscr & FPSCR_SZ) {
1071 TCGv addr_hi = tcg_temp_new();
1072 int fr = XREG(B7_4);
1073 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1074 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1075 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1076 tcg_temp_free(addr_hi);
1077 } else {
1078 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1080 return;
1081 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1082 CHECK_FPU_ENABLED
1083 if (ctx->fpscr & FPSCR_SZ) {
1084 TCGv addr_hi = tcg_temp_new();
1085 int fr = XREG(B11_8);
1086 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1087 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1088 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1089 tcg_temp_free(addr_hi);
1090 } else {
1091 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1093 return;
1094 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1095 CHECK_FPU_ENABLED
1096 if (ctx->fpscr & FPSCR_SZ) {
1097 TCGv addr_hi = tcg_temp_new();
1098 int fr = XREG(B11_8);
1099 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1100 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1101 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1102 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1103 tcg_temp_free(addr_hi);
1104 } else {
1105 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1106 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1108 return;
1109 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1110 CHECK_FPU_ENABLED
1111 if (ctx->fpscr & FPSCR_SZ) {
1112 TCGv addr = tcg_temp_new_i32();
1113 int fr = XREG(B7_4);
1114 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1115 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1116 tcg_gen_subi_i32(addr, addr, 4);
1117 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1118 tcg_gen_mov_i32(REG(B11_8), addr);
1119 tcg_temp_free(addr);
1120 } else {
1121 TCGv addr;
1122 addr = tcg_temp_new_i32();
1123 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1124 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1125 tcg_gen_mov_i32(REG(B11_8), addr);
1126 tcg_temp_free(addr);
1128 return;
1129 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1130 CHECK_FPU_ENABLED
1132 TCGv addr = tcg_temp_new_i32();
1133 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1134 if (ctx->fpscr & FPSCR_SZ) {
1135 int fr = XREG(B11_8);
1136 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1137 tcg_gen_addi_i32(addr, addr, 4);
1138 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1139 } else {
1140 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1142 tcg_temp_free(addr);
1144 return;
1145 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1146 CHECK_FPU_ENABLED
1148 TCGv addr = tcg_temp_new();
1149 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1150 if (ctx->fpscr & FPSCR_SZ) {
1151 int fr = XREG(B7_4);
1152 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1153 tcg_gen_addi_i32(addr, addr, 4);
1154 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1155 } else {
1156 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1158 tcg_temp_free(addr);
1160 return;
1161 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1162 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1163 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1164 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1165 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1166 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1168 CHECK_FPU_ENABLED
1169 if (ctx->fpscr & FPSCR_PR) {
1170 TCGv_i64 fp0, fp1;
1172 if (ctx->opcode & 0x0110)
1173 break; /* illegal instruction */
1174 fp0 = tcg_temp_new_i64();
1175 fp1 = tcg_temp_new_i64();
1176 gen_load_fpr64(fp0, DREG(B11_8));
1177 gen_load_fpr64(fp1, DREG(B7_4));
1178 switch (ctx->opcode & 0xf00f) {
1179 case 0xf000: /* fadd Rm,Rn */
1180 gen_helper_fadd_DT(fp0, fp0, fp1);
1181 break;
1182 case 0xf001: /* fsub Rm,Rn */
1183 gen_helper_fsub_DT(fp0, fp0, fp1);
1184 break;
1185 case 0xf002: /* fmul Rm,Rn */
1186 gen_helper_fmul_DT(fp0, fp0, fp1);
1187 break;
1188 case 0xf003: /* fdiv Rm,Rn */
1189 gen_helper_fdiv_DT(fp0, fp0, fp1);
1190 break;
1191 case 0xf004: /* fcmp/eq Rm,Rn */
1192 gen_helper_fcmp_eq_DT(fp0, fp1);
1193 return;
1194 case 0xf005: /* fcmp/gt Rm,Rn */
1195 gen_helper_fcmp_gt_DT(fp0, fp1);
1196 return;
1198 gen_store_fpr64(fp0, DREG(B11_8));
1199 tcg_temp_free_i64(fp0);
1200 tcg_temp_free_i64(fp1);
1201 } else {
1202 switch (ctx->opcode & 0xf00f) {
1203 case 0xf000: /* fadd Rm,Rn */
1204 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1205 break;
1206 case 0xf001: /* fsub Rm,Rn */
1207 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1208 break;
1209 case 0xf002: /* fmul Rm,Rn */
1210 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1211 break;
1212 case 0xf003: /* fdiv Rm,Rn */
1213 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1214 break;
1215 case 0xf004: /* fcmp/eq Rm,Rn */
1216 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1217 return;
1218 case 0xf005: /* fcmp/gt Rm,Rn */
1219 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1220 return;
1224 return;
1225 case 0xf00e: /* fmac FR0,RM,Rn */
1227 CHECK_FPU_ENABLED
1228 if (ctx->fpscr & FPSCR_PR) {
1229 break; /* illegal instruction */
1230 } else {
1231 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1232 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1233 return;
1238 switch (ctx->opcode & 0xff00) {
1239 case 0xc900: /* and #imm,R0 */
1240 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1241 return;
1242 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1244 TCGv addr, val;
1245 addr = tcg_temp_new();
1246 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1247 val = tcg_temp_new();
1248 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1249 tcg_gen_andi_i32(val, val, B7_0);
1250 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1251 tcg_temp_free(val);
1252 tcg_temp_free(addr);
1254 return;
1255 case 0x8b00: /* bf label */
1256 CHECK_NOT_DELAY_SLOT
1257 gen_conditional_jump(ctx, ctx->pc + 2,
1258 ctx->pc + 4 + B7_0s * 2);
1259 ctx->bstate = BS_BRANCH;
1260 return;
1261 case 0x8f00: /* bf/s label */
1262 CHECK_NOT_DELAY_SLOT
1263 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1264 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1265 return;
1266 case 0x8900: /* bt label */
1267 CHECK_NOT_DELAY_SLOT
1268 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1269 ctx->pc + 2);
1270 ctx->bstate = BS_BRANCH;
1271 return;
1272 case 0x8d00: /* bt/s label */
1273 CHECK_NOT_DELAY_SLOT
1274 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1275 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1276 return;
1277 case 0x8800: /* cmp/eq #imm,R0 */
1278 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1279 return;
1280 case 0xc400: /* mov.b @(disp,GBR),R0 */
1282 TCGv addr = tcg_temp_new();
1283 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1284 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1285 tcg_temp_free(addr);
1287 return;
1288 case 0xc500: /* mov.w @(disp,GBR),R0 */
1290 TCGv addr = tcg_temp_new();
1291 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1292 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1293 tcg_temp_free(addr);
1295 return;
1296 case 0xc600: /* mov.l @(disp,GBR),R0 */
1298 TCGv addr = tcg_temp_new();
1299 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1300 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1301 tcg_temp_free(addr);
1303 return;
1304 case 0xc000: /* mov.b R0,@(disp,GBR) */
1306 TCGv addr = tcg_temp_new();
1307 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1308 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1309 tcg_temp_free(addr);
1311 return;
1312 case 0xc100: /* mov.w R0,@(disp,GBR) */
1314 TCGv addr = tcg_temp_new();
1315 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1316 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1317 tcg_temp_free(addr);
1319 return;
1320 case 0xc200: /* mov.l R0,@(disp,GBR) */
1322 TCGv addr = tcg_temp_new();
1323 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1324 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1325 tcg_temp_free(addr);
1327 return;
1328 case 0x8000: /* mov.b R0,@(disp,Rn) */
1330 TCGv addr = tcg_temp_new();
1331 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1332 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1333 tcg_temp_free(addr);
1335 return;
1336 case 0x8100: /* mov.w R0,@(disp,Rn) */
1338 TCGv addr = tcg_temp_new();
1339 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1340 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1341 tcg_temp_free(addr);
1343 return;
1344 case 0x8400: /* mov.b @(disp,Rn),R0 */
1346 TCGv addr = tcg_temp_new();
1347 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1348 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1349 tcg_temp_free(addr);
1351 return;
1352 case 0x8500: /* mov.w @(disp,Rn),R0 */
1354 TCGv addr = tcg_temp_new();
1355 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1356 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1357 tcg_temp_free(addr);
1359 return;
1360 case 0xc700: /* mova @(disp,PC),R0 */
1361 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1362 return;
1363 case 0xcb00: /* or #imm,R0 */
1364 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1365 return;
1366 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1368 TCGv addr, val;
1369 addr = tcg_temp_new();
1370 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1371 val = tcg_temp_new();
1372 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1373 tcg_gen_ori_i32(val, val, B7_0);
1374 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1375 tcg_temp_free(val);
1376 tcg_temp_free(addr);
1378 return;
1379 case 0xc300: /* trapa #imm */
1381 TCGv imm;
1382 CHECK_NOT_DELAY_SLOT
1383 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1384 imm = tcg_const_i32(B7_0);
1385 gen_helper_trapa(imm);
1386 tcg_temp_free(imm);
1387 ctx->bstate = BS_BRANCH;
1389 return;
1390 case 0xc800: /* tst #imm,R0 */
1392 TCGv val = tcg_temp_new();
1393 tcg_gen_andi_i32(val, REG(0), B7_0);
1394 gen_cmp_imm(TCG_COND_EQ, val, 0);
1395 tcg_temp_free(val);
1397 return;
1398 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1400 TCGv val = tcg_temp_new();
1401 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1402 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1403 tcg_gen_andi_i32(val, val, B7_0);
1404 gen_cmp_imm(TCG_COND_EQ, val, 0);
1405 tcg_temp_free(val);
1407 return;
1408 case 0xca00: /* xor #imm,R0 */
1409 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1410 return;
1411 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1413 TCGv addr, val;
1414 addr = tcg_temp_new();
1415 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1416 val = tcg_temp_new();
1417 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1418 tcg_gen_xori_i32(val, val, B7_0);
1419 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1420 tcg_temp_free(val);
1421 tcg_temp_free(addr);
1423 return;
1426 switch (ctx->opcode & 0xf08f) {
1427 case 0x408e: /* ldc Rm,Rn_BANK */
1428 CHECK_PRIVILEGED
1429 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1430 return;
1431 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1432 CHECK_PRIVILEGED
1433 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1434 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1435 return;
1436 case 0x0082: /* stc Rm_BANK,Rn */
1437 CHECK_PRIVILEGED
1438 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1439 return;
1440 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1441 CHECK_PRIVILEGED
1443 TCGv addr = tcg_temp_new();
1444 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1445 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1446 tcg_gen_mov_i32(REG(B11_8), addr);
1447 tcg_temp_free(addr);
1449 return;
1452 switch (ctx->opcode & 0xf0ff) {
1453 case 0x0023: /* braf Rn */
1454 CHECK_NOT_DELAY_SLOT
1455 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1456 ctx->flags |= DELAY_SLOT;
1457 ctx->delayed_pc = (uint32_t) - 1;
1458 return;
1459 case 0x0003: /* bsrf Rn */
1460 CHECK_NOT_DELAY_SLOT
1461 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1462 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1463 ctx->flags |= DELAY_SLOT;
1464 ctx->delayed_pc = (uint32_t) - 1;
1465 return;
1466 case 0x4015: /* cmp/pl Rn */
1467 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1468 return;
1469 case 0x4011: /* cmp/pz Rn */
1470 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1471 return;
1472 case 0x4010: /* dt Rn */
1473 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1474 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1475 return;
1476 case 0x402b: /* jmp @Rn */
1477 CHECK_NOT_DELAY_SLOT
1478 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1479 ctx->flags |= DELAY_SLOT;
1480 ctx->delayed_pc = (uint32_t) - 1;
1481 return;
1482 case 0x400b: /* jsr @Rn */
1483 CHECK_NOT_DELAY_SLOT
1484 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1485 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1486 ctx->flags |= DELAY_SLOT;
1487 ctx->delayed_pc = (uint32_t) - 1;
1488 return;
1489 case 0x400e: /* ldc Rm,SR */
1490 CHECK_PRIVILEGED
1491 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1492 ctx->bstate = BS_STOP;
1493 return;
1494 case 0x4007: /* ldc.l @Rm+,SR */
1495 CHECK_PRIVILEGED
1497 TCGv val = tcg_temp_new();
1498 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1499 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1500 tcg_temp_free(val);
1501 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1502 ctx->bstate = BS_STOP;
1504 return;
1505 case 0x0002: /* stc SR,Rn */
1506 CHECK_PRIVILEGED
1507 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1508 return;
1509 case 0x4003: /* stc SR,@-Rn */
1510 CHECK_PRIVILEGED
1512 TCGv addr = tcg_temp_new();
1513 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1514 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1515 tcg_gen_mov_i32(REG(B11_8), addr);
1516 tcg_temp_free(addr);
1518 return;
1519 #define LD(reg,ldnum,ldpnum,prechk) \
1520 case ldnum: \
1521 prechk \
1522 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1523 return; \
1524 case ldpnum: \
1525 prechk \
1526 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1527 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1528 return;
1529 #define ST(reg,stnum,stpnum,prechk) \
1530 case stnum: \
1531 prechk \
1532 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1533 return; \
1534 case stpnum: \
1535 prechk \
1537 TCGv addr = tcg_temp_new(); \
1538 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1539 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1540 tcg_gen_mov_i32(REG(B11_8), addr); \
1541 tcg_temp_free(addr); \
1543 return;
1544 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1545 LD(reg,ldnum,ldpnum,prechk) \
1546 ST(reg,stnum,stpnum,prechk)
1547 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1548 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1549 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1550 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1551 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1552 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1553 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1554 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1555 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1556 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1557 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1558 case 0x406a: /* lds Rm,FPSCR */
1559 CHECK_FPU_ENABLED
1560 gen_helper_ld_fpscr(REG(B11_8));
1561 ctx->bstate = BS_STOP;
1562 return;
1563 case 0x4066: /* lds.l @Rm+,FPSCR */
1564 CHECK_FPU_ENABLED
1566 TCGv addr = tcg_temp_new();
1567 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1568 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1569 gen_helper_ld_fpscr(addr);
1570 tcg_temp_free(addr);
1571 ctx->bstate = BS_STOP;
1573 return;
1574 case 0x006a: /* sts FPSCR,Rn */
1575 CHECK_FPU_ENABLED
1576 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1577 return;
1578 case 0x4062: /* sts FPSCR,@-Rn */
1579 CHECK_FPU_ENABLED
1581 TCGv addr, val;
1582 val = tcg_temp_new();
1583 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1584 addr = tcg_temp_new();
1585 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1586 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1587 tcg_gen_mov_i32(REG(B11_8), addr);
1588 tcg_temp_free(addr);
1589 tcg_temp_free(val);
1591 return;
1592 case 0x00c3: /* movca.l R0,@Rm */
1594 TCGv val = tcg_temp_new();
1595 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1596 gen_helper_movcal (REG(B11_8), val);
1597 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1599 ctx->has_movcal = 1;
1600 return;
1601 case 0x40a9:
1602 /* MOVUA.L @Rm,R0 (Rm) -> R0
1603 Load non-boundary-aligned data */
1604 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1605 return;
1606 case 0x40e9:
1607 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1608 Load non-boundary-aligned data */
1609 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1610 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1611 return;
1612 case 0x0029: /* movt Rn */
1613 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1614 return;
1615 case 0x0073:
1616 /* MOVCO.L
1617 LDST -> T
1618 If (T == 1) R0 -> (Rn)
1619 0 -> LDST
1621 if (ctx->features & SH_FEATURE_SH4A) {
1622 int label = gen_new_label();
1623 gen_clr_t();
1624 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1625 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1626 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1627 gen_set_label(label);
1628 tcg_gen_movi_i32(cpu_ldst, 0);
1629 return;
1630 } else
1631 break;
1632 case 0x0063:
1633 /* MOVLI.L @Rm,R0
1634 1 -> LDST
1635 (Rm) -> R0
1636 When interrupt/exception
1637 occurred 0 -> LDST
1639 if (ctx->features & SH_FEATURE_SH4A) {
1640 tcg_gen_movi_i32(cpu_ldst, 0);
1641 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1642 tcg_gen_movi_i32(cpu_ldst, 1);
1643 return;
1644 } else
1645 break;
1646 case 0x0093: /* ocbi @Rn */
1648 gen_helper_ocbi (REG(B11_8));
1650 return;
1651 case 0x00a3: /* ocbp @Rn */
1653 TCGv dummy = tcg_temp_new();
1654 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1655 tcg_temp_free(dummy);
1657 return;
1658 case 0x00b3: /* ocbwb @Rn */
1660 TCGv dummy = tcg_temp_new();
1661 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1662 tcg_temp_free(dummy);
1664 return;
1665 case 0x0083: /* pref @Rn */
1666 return;
1667 case 0x00d3: /* prefi @Rn */
1668 if (ctx->features & SH_FEATURE_SH4A)
1669 return;
1670 else
1671 break;
1672 case 0x00e3: /* icbi @Rn */
1673 if (ctx->features & SH_FEATURE_SH4A)
1674 return;
1675 else
1676 break;
1677 case 0x00ab: /* synco */
1678 if (ctx->features & SH_FEATURE_SH4A)
1679 return;
1680 else
1681 break;
1682 case 0x4024: /* rotcl Rn */
1684 TCGv tmp = tcg_temp_new();
1685 tcg_gen_mov_i32(tmp, cpu_sr);
1686 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1687 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1688 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1689 tcg_temp_free(tmp);
1691 return;
1692 case 0x4025: /* rotcr Rn */
1694 TCGv tmp = tcg_temp_new();
1695 tcg_gen_mov_i32(tmp, cpu_sr);
1696 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1697 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1698 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1699 tcg_temp_free(tmp);
1701 return;
1702 case 0x4004: /* rotl Rn */
1703 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1704 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1705 gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
1706 return;
1707 case 0x4005: /* rotr Rn */
1708 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1709 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1710 gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
1711 return;
1712 case 0x4000: /* shll Rn */
1713 case 0x4020: /* shal Rn */
1714 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1715 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1716 return;
1717 case 0x4021: /* shar Rn */
1718 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1719 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1720 return;
1721 case 0x4001: /* shlr Rn */
1722 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1723 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1724 return;
1725 case 0x4008: /* shll2 Rn */
1726 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1727 return;
1728 case 0x4018: /* shll8 Rn */
1729 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1730 return;
1731 case 0x4028: /* shll16 Rn */
1732 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1733 return;
1734 case 0x4009: /* shlr2 Rn */
1735 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1736 return;
1737 case 0x4019: /* shlr8 Rn */
1738 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1739 return;
1740 case 0x4029: /* shlr16 Rn */
1741 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1742 return;
1743 case 0x401b: /* tas.b @Rn */
1745 TCGv addr, val;
1746 addr = tcg_temp_local_new();
1747 tcg_gen_mov_i32(addr, REG(B11_8));
1748 val = tcg_temp_local_new();
1749 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1750 gen_cmp_imm(TCG_COND_EQ, val, 0);
1751 tcg_gen_ori_i32(val, val, 0x80);
1752 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1753 tcg_temp_free(val);
1754 tcg_temp_free(addr);
1756 return;
1757 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1758 CHECK_FPU_ENABLED
1759 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1760 return;
1761 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1762 CHECK_FPU_ENABLED
1763 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1764 return;
1765 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1766 CHECK_FPU_ENABLED
1767 if (ctx->fpscr & FPSCR_PR) {
1768 TCGv_i64 fp;
1769 if (ctx->opcode & 0x0100)
1770 break; /* illegal instruction */
1771 fp = tcg_temp_new_i64();
1772 gen_helper_float_DT(fp, cpu_fpul);
1773 gen_store_fpr64(fp, DREG(B11_8));
1774 tcg_temp_free_i64(fp);
1776 else {
1777 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1779 return;
1780 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1781 CHECK_FPU_ENABLED
1782 if (ctx->fpscr & FPSCR_PR) {
1783 TCGv_i64 fp;
1784 if (ctx->opcode & 0x0100)
1785 break; /* illegal instruction */
1786 fp = tcg_temp_new_i64();
1787 gen_load_fpr64(fp, DREG(B11_8));
1788 gen_helper_ftrc_DT(cpu_fpul, fp);
1789 tcg_temp_free_i64(fp);
1791 else {
1792 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1794 return;
1795 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1796 CHECK_FPU_ENABLED
1798 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1800 return;
1801 case 0xf05d: /* fabs FRn/DRn */
1802 CHECK_FPU_ENABLED
1803 if (ctx->fpscr & FPSCR_PR) {
1804 if (ctx->opcode & 0x0100)
1805 break; /* illegal instruction */
1806 TCGv_i64 fp = tcg_temp_new_i64();
1807 gen_load_fpr64(fp, DREG(B11_8));
1808 gen_helper_fabs_DT(fp, fp);
1809 gen_store_fpr64(fp, DREG(B11_8));
1810 tcg_temp_free_i64(fp);
1811 } else {
1812 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1814 return;
1815 case 0xf06d: /* fsqrt FRn */
1816 CHECK_FPU_ENABLED
1817 if (ctx->fpscr & FPSCR_PR) {
1818 if (ctx->opcode & 0x0100)
1819 break; /* illegal instruction */
1820 TCGv_i64 fp = tcg_temp_new_i64();
1821 gen_load_fpr64(fp, DREG(B11_8));
1822 gen_helper_fsqrt_DT(fp, fp);
1823 gen_store_fpr64(fp, DREG(B11_8));
1824 tcg_temp_free_i64(fp);
1825 } else {
1826 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1828 return;
1829 case 0xf07d: /* fsrra FRn */
1830 CHECK_FPU_ENABLED
1831 break;
1832 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1833 CHECK_FPU_ENABLED
1834 if (!(ctx->fpscr & FPSCR_PR)) {
1835 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1837 return;
1838 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1839 CHECK_FPU_ENABLED
1840 if (!(ctx->fpscr & FPSCR_PR)) {
1841 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1843 return;
1844 case 0xf0ad: /* fcnvsd FPUL,DRn */
1845 CHECK_FPU_ENABLED
1847 TCGv_i64 fp = tcg_temp_new_i64();
1848 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1849 gen_store_fpr64(fp, DREG(B11_8));
1850 tcg_temp_free_i64(fp);
1852 return;
1853 case 0xf0bd: /* fcnvds DRn,FPUL */
1854 CHECK_FPU_ENABLED
1856 TCGv_i64 fp = tcg_temp_new_i64();
1857 gen_load_fpr64(fp, DREG(B11_8));
1858 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1859 tcg_temp_free_i64(fp);
1861 return;
1862 case 0xf0ed: /* fipr FVm,FVn */
1863 CHECK_FPU_ENABLED
1864 if ((ctx->fpscr & FPSCR_PR) == 0) {
1865 TCGv m, n;
1866 m = tcg_const_i32((ctx->opcode >> 16) & 3);
1867 n = tcg_const_i32((ctx->opcode >> 18) & 3);
1868 gen_helper_fipr(m, n);
1869 tcg_temp_free(m);
1870 tcg_temp_free(n);
1871 return;
1873 break;
1875 #if 0
1876 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1877 ctx->opcode, ctx->pc);
1878 fflush(stderr);
1879 #endif
1880 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1881 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1882 gen_helper_raise_slot_illegal_instruction();
1883 } else {
1884 gen_helper_raise_illegal_instruction();
1886 ctx->bstate = BS_EXCP;
1889 static void decode_opc(DisasContext * ctx)
1891 uint32_t old_flags = ctx->flags;
1893 _decode_opc(ctx);
1895 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1896 if (ctx->flags & DELAY_SLOT_CLEARME) {
1897 gen_store_flags(0);
1898 } else {
1899 /* go out of the delay slot */
1900 uint32_t new_flags = ctx->flags;
1901 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1902 gen_store_flags(new_flags);
1904 ctx->flags = 0;
1905 ctx->bstate = BS_BRANCH;
1906 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1907 gen_delayed_conditional_jump(ctx);
1908 } else if (old_flags & DELAY_SLOT) {
1909 gen_jump(ctx);
1914 /* go into a delay slot */
1915 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1916 gen_store_flags(ctx->flags);
1919 static inline void
1920 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1921 int search_pc)
1923 DisasContext ctx;
1924 target_ulong pc_start;
1925 static uint16_t *gen_opc_end;
1926 CPUBreakpoint *bp;
1927 int i, ii;
1928 int num_insns;
1929 int max_insns;
1931 pc_start = tb->pc;
1932 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1933 ctx.pc = pc_start;
1934 ctx.flags = (uint32_t)tb->flags;
1935 ctx.bstate = BS_NONE;
1936 ctx.sr = env->sr;
1937 ctx.fpscr = env->fpscr;
1938 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1939 /* We don't know if the delayed pc came from a dynamic or static branch,
1940 so assume it is a dynamic branch. */
1941 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1942 ctx.tb = tb;
1943 ctx.singlestep_enabled = env->singlestep_enabled;
1944 ctx.features = env->features;
1945 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1947 ii = -1;
1948 num_insns = 0;
1949 max_insns = tb->cflags & CF_COUNT_MASK;
1950 if (max_insns == 0)
1951 max_insns = CF_COUNT_MASK;
1952 gen_icount_start();
1953 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1954 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1955 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1956 if (ctx.pc == bp->pc) {
1957 /* We have hit a breakpoint - make sure PC is up-to-date */
1958 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1959 gen_helper_debug();
1960 ctx.bstate = BS_EXCP;
1961 break;
1965 if (search_pc) {
1966 i = gen_opc_ptr - gen_opc_buf;
1967 if (ii < i) {
1968 ii++;
1969 while (ii < i)
1970 gen_opc_instr_start[ii++] = 0;
1972 gen_opc_pc[ii] = ctx.pc;
1973 gen_opc_hflags[ii] = ctx.flags;
1974 gen_opc_instr_start[ii] = 1;
1975 gen_opc_icount[ii] = num_insns;
1977 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1978 gen_io_start();
1979 #if 0
1980 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1981 fflush(stderr);
1982 #endif
1983 ctx.opcode = lduw_code(ctx.pc);
1984 decode_opc(&ctx);
1985 num_insns++;
1986 ctx.pc += 2;
1987 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1988 break;
1989 if (env->singlestep_enabled)
1990 break;
1991 if (num_insns >= max_insns)
1992 break;
1993 if (singlestep)
1994 break;
1996 if (tb->cflags & CF_LAST_IO)
1997 gen_io_end();
1998 if (env->singlestep_enabled) {
1999 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2000 gen_helper_debug();
2001 } else {
2002 switch (ctx.bstate) {
2003 case BS_STOP:
2004 /* gen_op_interrupt_restart(); */
2005 /* fall through */
2006 case BS_NONE:
2007 if (ctx.flags) {
2008 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2010 gen_goto_tb(&ctx, 0, ctx.pc);
2011 break;
2012 case BS_EXCP:
2013 /* gen_op_interrupt_restart(); */
2014 tcg_gen_exit_tb(0);
2015 break;
2016 case BS_BRANCH:
2017 default:
2018 break;
2022 gen_icount_end(tb, num_insns);
2023 *gen_opc_ptr = INDEX_op_end;
2024 if (search_pc) {
2025 i = gen_opc_ptr - gen_opc_buf;
2026 ii++;
2027 while (ii <= i)
2028 gen_opc_instr_start[ii++] = 0;
2029 } else {
2030 tb->size = ctx.pc - pc_start;
2031 tb->icount = num_insns;
2034 #ifdef DEBUG_DISAS
2035 #ifdef SH4_DEBUG_DISAS
2036 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2037 #endif
2038 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2039 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2040 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2041 qemu_log("\n");
2043 #endif
2046 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2048 gen_intermediate_code_internal(env, tb, 0);
2051 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2053 gen_intermediate_code_internal(env, tb, 1);
2056 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2057 unsigned long searched_pc, int pc_pos, void *puc)
2059 env->pc = gen_opc_pc[pc_pos];
2060 env->flags = gen_opc_hflags[pc_pos];