target-sh4: define FPSCR constants
[qemu.git] / target-sh4 / translate.c
blobd4cd0a3f865d5527a2a8a2ebf6a54a06271dfb5a
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #define DEBUG_DISAS
26 #define SH4_DEBUG_DISAS
27 //#define SH4_SINGLE_STEP
29 #include "cpu.h"
30 #include "exec-all.h"
31 #include "disas.h"
32 #include "tcg-op.h"
33 #include "qemu-common.h"
35 #include "helper.h"
36 #define GEN_HELPER 1
37 #include "helper.h"
39 typedef struct DisasContext {
40 struct TranslationBlock *tb;
41 target_ulong pc;
42 uint32_t sr;
43 uint32_t fpscr;
44 uint16_t opcode;
45 uint32_t flags;
46 int bstate;
47 int memidx;
48 uint32_t delayed_pc;
49 int singlestep_enabled;
50 uint32_t features;
51 int has_movcal;
52 } DisasContext;
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(ctx) 1
56 #else
57 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
58 #endif
60 enum {
61 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
62 * exception condition
64 BS_STOP = 1, /* We want to stop translation for any reason */
65 BS_BRANCH = 2, /* We reached a branch condition */
66 BS_EXCP = 3, /* We reached an exception condition */
69 /* global register indexes */
70 static TCGv_ptr cpu_env;
71 static TCGv cpu_gregs[24];
72 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
73 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
74 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
75 static TCGv cpu_fregs[32];
77 /* internal register indexes */
78 static TCGv cpu_flags, cpu_delayed_pc;
80 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
82 #include "gen-icount.h"
84 static void sh4_translate_init(void)
86 int i;
87 static int done_init = 0;
88 static const char * const gregnames[24] = {
89 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
90 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
91 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
92 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
93 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
95 static const char * const fregnames[32] = {
96 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
97 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
98 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
99 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
100 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
101 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
102 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
103 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
106 if (done_init)
107 return;
109 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
111 for (i = 0; i < 24; i++)
112 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUState, gregs[i]),
114 gregnames[i]);
116 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, pc), "PC");
118 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, sr), "SR");
120 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUState, ssr), "SSR");
122 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUState, spc), "SPC");
124 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUState, gbr), "GBR");
126 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUState, vbr), "VBR");
128 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUState, sgr), "SGR");
130 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUState, dbr), "DBR");
132 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUState, mach), "MACH");
134 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUState, macl), "MACL");
136 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUState, pr), "PR");
138 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUState, fpscr), "FPSCR");
140 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
141 offsetof(CPUState, fpul), "FPUL");
143 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUState, flags), "_flags_");
145 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUState, delayed_pc),
147 "_delayed_pc_");
148 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
149 offsetof(CPUState, ldst), "_ldst_");
151 for (i = 0; i < 32; i++)
152 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
153 offsetof(CPUState, fregs[i]),
154 fregnames[i]);
156 /* register helpers */
157 #define GEN_HELPER 2
158 #include "helper.h"
160 done_init = 1;
163 void cpu_dump_state(CPUState * env, FILE * f,
164 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
165 int flags)
167 int i;
168 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
169 env->pc, env->sr, env->pr, env->fpscr);
170 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
171 env->spc, env->ssr, env->gbr, env->vbr);
172 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
173 env->sgr, env->dbr, env->delayed_pc, env->fpul);
174 for (i = 0; i < 24; i += 4) {
175 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
176 i, env->gregs[i], i + 1, env->gregs[i + 1],
177 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
179 if (env->flags & DELAY_SLOT) {
180 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
183 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void cpu_sh4_reset(CPUSH4State * env)
190 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
191 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
192 log_cpu_state(env, 0);
195 #if defined(CONFIG_USER_ONLY)
196 env->sr = 0;
197 #else
198 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
199 #endif
200 env->vbr = 0;
201 env->pc = 0xA0000000;
202 #if defined(CONFIG_USER_ONLY)
203 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
204 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
205 #else
206 env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
207 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
208 #endif
209 set_default_nan_mode(1, &env->fp_status);
210 env->mmucr = 0;
213 typedef struct {
214 const char *name;
215 int id;
216 uint32_t pvr;
217 uint32_t prr;
218 uint32_t cvr;
219 uint32_t features;
220 } sh4_def_t;
222 static sh4_def_t sh4_defs[] = {
224 .name = "SH7750R",
225 .id = SH_CPU_SH7750R,
226 .pvr = 0x00050000,
227 .prr = 0x00000100,
228 .cvr = 0x00110000,
229 .features = SH_FEATURE_BCR3_AND_BCR4,
230 }, {
231 .name = "SH7751R",
232 .id = SH_CPU_SH7751R,
233 .pvr = 0x04050005,
234 .prr = 0x00000113,
235 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
236 .features = SH_FEATURE_BCR3_AND_BCR4,
237 }, {
238 .name = "SH7785",
239 .id = SH_CPU_SH7785,
240 .pvr = 0x10300700,
241 .prr = 0x00000200,
242 .cvr = 0x71440211,
243 .features = SH_FEATURE_SH4A,
247 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
249 int i;
251 if (strcasecmp(name, "any") == 0)
252 return &sh4_defs[0];
254 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
255 if (strcasecmp(name, sh4_defs[i].name) == 0)
256 return &sh4_defs[i];
258 return NULL;
261 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
263 int i;
265 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
266 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
269 static void cpu_sh4_register(CPUSH4State *env, const sh4_def_t *def)
271 env->pvr = def->pvr;
272 env->prr = def->prr;
273 env->cvr = def->cvr;
274 env->id = def->id;
277 CPUSH4State *cpu_sh4_init(const char *cpu_model)
279 CPUSH4State *env;
280 const sh4_def_t *def;
282 def = cpu_sh4_find_by_name(cpu_model);
283 if (!def)
284 return NULL;
285 env = qemu_mallocz(sizeof(CPUSH4State));
286 env->features = def->features;
287 cpu_exec_init(env);
288 env->movcal_backup_tail = &(env->movcal_backup);
289 sh4_translate_init();
290 env->cpu_model_str = cpu_model;
291 cpu_sh4_reset(env);
292 cpu_sh4_register(env, def);
293 tlb_flush(env, 1);
294 qemu_init_vcpu(env);
295 return env;
298 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
300 TranslationBlock *tb;
301 tb = ctx->tb;
303 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
304 !ctx->singlestep_enabled) {
305 /* Use a direct jump if in same page and singlestep not enabled */
306 tcg_gen_goto_tb(n);
307 tcg_gen_movi_i32(cpu_pc, dest);
308 tcg_gen_exit_tb((long) tb + n);
309 } else {
310 tcg_gen_movi_i32(cpu_pc, dest);
311 if (ctx->singlestep_enabled)
312 gen_helper_debug();
313 tcg_gen_exit_tb(0);
317 static void gen_jump(DisasContext * ctx)
319 if (ctx->delayed_pc == (uint32_t) - 1) {
320 /* Target is not statically known, it comes necessarily from a
321 delayed jump as immediate jump are conditinal jumps */
322 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
323 if (ctx->singlestep_enabled)
324 gen_helper_debug();
325 tcg_gen_exit_tb(0);
326 } else {
327 gen_goto_tb(ctx, 0, ctx->delayed_pc);
331 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
333 TCGv sr;
334 int label = gen_new_label();
335 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
336 sr = tcg_temp_new();
337 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
338 tcg_gen_brcondi_i32(TCG_COND_NE, sr, t ? SR_T : 0, label);
339 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
340 gen_set_label(label);
343 /* Immediate conditional jump (bt or bf) */
344 static void gen_conditional_jump(DisasContext * ctx,
345 target_ulong ift, target_ulong ifnott)
347 int l1;
348 TCGv sr;
350 l1 = gen_new_label();
351 sr = tcg_temp_new();
352 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
353 tcg_gen_brcondi_i32(TCG_COND_EQ, sr, SR_T, l1);
354 gen_goto_tb(ctx, 0, ifnott);
355 gen_set_label(l1);
356 gen_goto_tb(ctx, 1, ift);
359 /* Delayed conditional jump (bt or bf) */
360 static void gen_delayed_conditional_jump(DisasContext * ctx)
362 int l1;
363 TCGv ds;
365 l1 = gen_new_label();
366 ds = tcg_temp_new();
367 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
368 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, DELAY_SLOT_TRUE, l1);
369 gen_goto_tb(ctx, 1, ctx->pc + 2);
370 gen_set_label(l1);
371 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
372 gen_jump(ctx);
375 static inline void gen_set_t(void)
377 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
380 static inline void gen_clr_t(void)
382 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
385 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
387 int label1 = gen_new_label();
388 int label2 = gen_new_label();
389 tcg_gen_brcond_i32(cond, t1, t0, label1);
390 gen_clr_t();
391 tcg_gen_br(label2);
392 gen_set_label(label1);
393 gen_set_t();
394 gen_set_label(label2);
397 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
399 int label1 = gen_new_label();
400 int label2 = gen_new_label();
401 tcg_gen_brcondi_i32(cond, t0, imm, label1);
402 gen_clr_t();
403 tcg_gen_br(label2);
404 gen_set_label(label1);
405 gen_set_t();
406 gen_set_label(label2);
409 static inline void gen_store_flags(uint32_t flags)
411 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
412 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
415 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
417 TCGv tmp = tcg_temp_new();
419 p0 &= 0x1f;
420 p1 &= 0x1f;
422 tcg_gen_andi_i32(tmp, t1, (1 << p1));
423 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
424 if (p0 < p1)
425 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
426 else if (p0 > p1)
427 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
428 tcg_gen_or_i32(t0, t0, tmp);
430 tcg_temp_free(tmp);
433 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
435 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
438 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
440 TCGv_i32 tmp = tcg_temp_new_i32();
441 tcg_gen_trunc_i64_i32(tmp, t);
442 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
443 tcg_gen_shri_i64(t, t, 32);
444 tcg_gen_trunc_i64_i32(tmp, t);
445 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
446 tcg_temp_free_i32(tmp);
449 #define B3_0 (ctx->opcode & 0xf)
450 #define B6_4 ((ctx->opcode >> 4) & 0x7)
451 #define B7_4 ((ctx->opcode >> 4) & 0xf)
452 #define B7_0 (ctx->opcode & 0xff)
453 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
454 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
455 (ctx->opcode & 0xfff))
456 #define B11_8 ((ctx->opcode >> 8) & 0xf)
457 #define B15_12 ((ctx->opcode >> 12) & 0xf)
459 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
460 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
462 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
463 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
465 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
466 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
467 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
468 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
470 #define CHECK_NOT_DELAY_SLOT \
471 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
473 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
474 gen_helper_raise_slot_illegal_instruction(); \
475 ctx->bstate = BS_EXCP; \
476 return; \
479 #define CHECK_PRIVILEGED \
480 if (IS_USER(ctx)) { \
481 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
482 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
483 gen_helper_raise_slot_illegal_instruction(); \
484 } else { \
485 gen_helper_raise_illegal_instruction(); \
487 ctx->bstate = BS_EXCP; \
488 return; \
491 #define CHECK_FPU_ENABLED \
492 if (ctx->flags & SR_FD) { \
493 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
494 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
495 gen_helper_raise_slot_fpu_disable(); \
496 } else { \
497 gen_helper_raise_fpu_disable(); \
499 ctx->bstate = BS_EXCP; \
500 return; \
503 static void _decode_opc(DisasContext * ctx)
505 /* This code tries to make movcal emulation sufficiently
506 accurate for Linux purposes. This instruction writes
507 memory, and prior to that, always allocates a cache line.
508 It is used in two contexts:
509 - in memcpy, where data is copied in blocks, the first write
510 of to a block uses movca.l for performance.
511 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
512 to flush the cache. Here, the data written by movcal.l is never
513 written to memory, and the data written is just bogus.
515 To simulate this, we simulate movcal.l, we store the value to memory,
516 but we also remember the previous content. If we see ocbi, we check
517 if movcal.l for that address was done previously. If so, the write should
518 not have hit the memory, so we restore the previous content.
519 When we see an instruction that is neither movca.l
520 nor ocbi, the previous content is discarded.
522 To optimize, we only try to flush stores when we're at the start of
523 TB, or if we already saw movca.l in this TB and did not flush stores
524 yet. */
525 if (ctx->has_movcal)
527 int opcode = ctx->opcode & 0xf0ff;
528 if (opcode != 0x0093 /* ocbi */
529 && opcode != 0x00c3 /* movca.l */)
531 gen_helper_discard_movcal_backup ();
532 ctx->has_movcal = 0;
536 #if 0
537 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
538 #endif
540 switch (ctx->opcode) {
541 case 0x0019: /* div0u */
542 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
543 return;
544 case 0x000b: /* rts */
545 CHECK_NOT_DELAY_SLOT
546 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
547 ctx->flags |= DELAY_SLOT;
548 ctx->delayed_pc = (uint32_t) - 1;
549 return;
550 case 0x0028: /* clrmac */
551 tcg_gen_movi_i32(cpu_mach, 0);
552 tcg_gen_movi_i32(cpu_macl, 0);
553 return;
554 case 0x0048: /* clrs */
555 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
556 return;
557 case 0x0008: /* clrt */
558 gen_clr_t();
559 return;
560 case 0x0038: /* ldtlb */
561 CHECK_PRIVILEGED
562 gen_helper_ldtlb();
563 return;
564 case 0x002b: /* rte */
565 CHECK_PRIVILEGED
566 CHECK_NOT_DELAY_SLOT
567 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
568 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
569 ctx->flags |= DELAY_SLOT;
570 ctx->delayed_pc = (uint32_t) - 1;
571 return;
572 case 0x0058: /* sets */
573 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
574 return;
575 case 0x0018: /* sett */
576 gen_set_t();
577 return;
578 case 0xfbfd: /* frchg */
579 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
580 ctx->bstate = BS_STOP;
581 return;
582 case 0xf3fd: /* fschg */
583 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
584 ctx->bstate = BS_STOP;
585 return;
586 case 0x0009: /* nop */
587 return;
588 case 0x001b: /* sleep */
589 CHECK_PRIVILEGED
590 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
591 return;
594 switch (ctx->opcode & 0xf000) {
595 case 0x1000: /* mov.l Rm,@(disp,Rn) */
597 TCGv addr = tcg_temp_new();
598 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
599 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
600 tcg_temp_free(addr);
602 return;
603 case 0x5000: /* mov.l @(disp,Rm),Rn */
605 TCGv addr = tcg_temp_new();
606 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
607 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
608 tcg_temp_free(addr);
610 return;
611 case 0xe000: /* mov #imm,Rn */
612 tcg_gen_movi_i32(REG(B11_8), B7_0s);
613 return;
614 case 0x9000: /* mov.w @(disp,PC),Rn */
616 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
617 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
618 tcg_temp_free(addr);
620 return;
621 case 0xd000: /* mov.l @(disp,PC),Rn */
623 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
624 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
625 tcg_temp_free(addr);
627 return;
628 case 0x7000: /* add #imm,Rn */
629 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
630 return;
631 case 0xa000: /* bra disp */
632 CHECK_NOT_DELAY_SLOT
633 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
634 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
635 ctx->flags |= DELAY_SLOT;
636 return;
637 case 0xb000: /* bsr disp */
638 CHECK_NOT_DELAY_SLOT
639 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
640 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
641 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
642 ctx->flags |= DELAY_SLOT;
643 return;
646 switch (ctx->opcode & 0xf00f) {
647 case 0x6003: /* mov Rm,Rn */
648 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
649 return;
650 case 0x2000: /* mov.b Rm,@Rn */
651 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
652 return;
653 case 0x2001: /* mov.w Rm,@Rn */
654 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
655 return;
656 case 0x2002: /* mov.l Rm,@Rn */
657 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
658 return;
659 case 0x6000: /* mov.b @Rm,Rn */
660 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
661 return;
662 case 0x6001: /* mov.w @Rm,Rn */
663 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
664 return;
665 case 0x6002: /* mov.l @Rm,Rn */
666 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
667 return;
668 case 0x2004: /* mov.b Rm,@-Rn */
670 TCGv addr = tcg_temp_new();
671 tcg_gen_subi_i32(addr, REG(B11_8), 1);
672 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
673 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
674 tcg_temp_free(addr);
676 return;
677 case 0x2005: /* mov.w Rm,@-Rn */
679 TCGv addr = tcg_temp_new();
680 tcg_gen_subi_i32(addr, REG(B11_8), 2);
681 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
682 tcg_gen_mov_i32(REG(B11_8), addr);
683 tcg_temp_free(addr);
685 return;
686 case 0x2006: /* mov.l Rm,@-Rn */
688 TCGv addr = tcg_temp_new();
689 tcg_gen_subi_i32(addr, REG(B11_8), 4);
690 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
691 tcg_gen_mov_i32(REG(B11_8), addr);
693 return;
694 case 0x6004: /* mov.b @Rm+,Rn */
695 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
696 if ( B11_8 != B7_4 )
697 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
698 return;
699 case 0x6005: /* mov.w @Rm+,Rn */
700 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
701 if ( B11_8 != B7_4 )
702 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
703 return;
704 case 0x6006: /* mov.l @Rm+,Rn */
705 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
706 if ( B11_8 != B7_4 )
707 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
708 return;
709 case 0x0004: /* mov.b Rm,@(R0,Rn) */
711 TCGv addr = tcg_temp_new();
712 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
713 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
714 tcg_temp_free(addr);
716 return;
717 case 0x0005: /* mov.w Rm,@(R0,Rn) */
719 TCGv addr = tcg_temp_new();
720 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
721 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
722 tcg_temp_free(addr);
724 return;
725 case 0x0006: /* mov.l Rm,@(R0,Rn) */
727 TCGv addr = tcg_temp_new();
728 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
729 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
730 tcg_temp_free(addr);
732 return;
733 case 0x000c: /* mov.b @(R0,Rm),Rn */
735 TCGv addr = tcg_temp_new();
736 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
737 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
738 tcg_temp_free(addr);
740 return;
741 case 0x000d: /* mov.w @(R0,Rm),Rn */
743 TCGv addr = tcg_temp_new();
744 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
745 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
746 tcg_temp_free(addr);
748 return;
749 case 0x000e: /* mov.l @(R0,Rm),Rn */
751 TCGv addr = tcg_temp_new();
752 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
753 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
754 tcg_temp_free(addr);
756 return;
757 case 0x6008: /* swap.b Rm,Rn */
759 TCGv high, low;
760 high = tcg_temp_new();
761 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
762 low = tcg_temp_new();
763 tcg_gen_ext16u_i32(low, REG(B7_4));
764 tcg_gen_bswap16_i32(low, low);
765 tcg_gen_or_i32(REG(B11_8), high, low);
766 tcg_temp_free(low);
767 tcg_temp_free(high);
769 return;
770 case 0x6009: /* swap.w Rm,Rn */
772 TCGv high, low;
773 high = tcg_temp_new();
774 tcg_gen_shli_i32(high, REG(B7_4), 16);
775 low = tcg_temp_new();
776 tcg_gen_shri_i32(low, REG(B7_4), 16);
777 tcg_gen_ext16u_i32(low, low);
778 tcg_gen_or_i32(REG(B11_8), high, low);
779 tcg_temp_free(low);
780 tcg_temp_free(high);
782 return;
783 case 0x200d: /* xtrct Rm,Rn */
785 TCGv high, low;
786 high = tcg_temp_new();
787 tcg_gen_shli_i32(high, REG(B7_4), 16);
788 low = tcg_temp_new();
789 tcg_gen_shri_i32(low, REG(B11_8), 16);
790 tcg_gen_ext16u_i32(low, low);
791 tcg_gen_or_i32(REG(B11_8), high, low);
792 tcg_temp_free(low);
793 tcg_temp_free(high);
795 return;
796 case 0x300c: /* add Rm,Rn */
797 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
798 return;
799 case 0x300e: /* addc Rm,Rn */
800 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
801 return;
802 case 0x300f: /* addv Rm,Rn */
803 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
804 return;
805 case 0x2009: /* and Rm,Rn */
806 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
807 return;
808 case 0x3000: /* cmp/eq Rm,Rn */
809 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
810 return;
811 case 0x3003: /* cmp/ge Rm,Rn */
812 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
813 return;
814 case 0x3007: /* cmp/gt Rm,Rn */
815 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
816 return;
817 case 0x3006: /* cmp/hi Rm,Rn */
818 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
819 return;
820 case 0x3002: /* cmp/hs Rm,Rn */
821 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
822 return;
823 case 0x200c: /* cmp/str Rm,Rn */
825 int label1 = gen_new_label();
826 int label2 = gen_new_label();
827 TCGv cmp1 = tcg_temp_local_new();
828 TCGv cmp2 = tcg_temp_local_new();
829 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
830 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
831 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
832 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
833 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
834 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
835 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
836 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
837 tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1);
838 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
839 tcg_gen_br(label2);
840 gen_set_label(label1);
841 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
842 gen_set_label(label2);
843 tcg_temp_free(cmp2);
844 tcg_temp_free(cmp1);
846 return;
847 case 0x2007: /* div0s Rm,Rn */
849 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
850 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
851 TCGv val = tcg_temp_new();
852 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
853 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
854 tcg_temp_free(val);
856 return;
857 case 0x3004: /* div1 Rm,Rn */
858 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
859 return;
860 case 0x300d: /* dmuls.l Rm,Rn */
862 TCGv_i64 tmp1 = tcg_temp_new_i64();
863 TCGv_i64 tmp2 = tcg_temp_new_i64();
865 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
866 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
867 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
868 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
869 tcg_gen_shri_i64(tmp1, tmp1, 32);
870 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
872 tcg_temp_free_i64(tmp2);
873 tcg_temp_free_i64(tmp1);
875 return;
876 case 0x3005: /* dmulu.l Rm,Rn */
878 TCGv_i64 tmp1 = tcg_temp_new_i64();
879 TCGv_i64 tmp2 = tcg_temp_new_i64();
881 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
882 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
883 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
884 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
885 tcg_gen_shri_i64(tmp1, tmp1, 32);
886 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
888 tcg_temp_free_i64(tmp2);
889 tcg_temp_free_i64(tmp1);
891 return;
892 case 0x600e: /* exts.b Rm,Rn */
893 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
894 return;
895 case 0x600f: /* exts.w Rm,Rn */
896 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
897 return;
898 case 0x600c: /* extu.b Rm,Rn */
899 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
900 return;
901 case 0x600d: /* extu.w Rm,Rn */
902 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
903 return;
904 case 0x000f: /* mac.l @Rm+,@Rn+ */
906 TCGv arg0, arg1;
907 arg0 = tcg_temp_new();
908 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
909 arg1 = tcg_temp_new();
910 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
911 gen_helper_macl(arg0, arg1);
912 tcg_temp_free(arg1);
913 tcg_temp_free(arg0);
914 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
915 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
917 return;
918 case 0x400f: /* mac.w @Rm+,@Rn+ */
920 TCGv arg0, arg1;
921 arg0 = tcg_temp_new();
922 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
923 arg1 = tcg_temp_new();
924 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
925 gen_helper_macw(arg0, arg1);
926 tcg_temp_free(arg1);
927 tcg_temp_free(arg0);
928 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
929 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
931 return;
932 case 0x0007: /* mul.l Rm,Rn */
933 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
934 return;
935 case 0x200f: /* muls.w Rm,Rn */
937 TCGv arg0, arg1;
938 arg0 = tcg_temp_new();
939 tcg_gen_ext16s_i32(arg0, REG(B7_4));
940 arg1 = tcg_temp_new();
941 tcg_gen_ext16s_i32(arg1, REG(B11_8));
942 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
943 tcg_temp_free(arg1);
944 tcg_temp_free(arg0);
946 return;
947 case 0x200e: /* mulu.w Rm,Rn */
949 TCGv arg0, arg1;
950 arg0 = tcg_temp_new();
951 tcg_gen_ext16u_i32(arg0, REG(B7_4));
952 arg1 = tcg_temp_new();
953 tcg_gen_ext16u_i32(arg1, REG(B11_8));
954 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
955 tcg_temp_free(arg1);
956 tcg_temp_free(arg0);
958 return;
959 case 0x600b: /* neg Rm,Rn */
960 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
961 return;
962 case 0x600a: /* negc Rm,Rn */
963 gen_helper_negc(REG(B11_8), REG(B7_4));
964 return;
965 case 0x6007: /* not Rm,Rn */
966 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
967 return;
968 case 0x200b: /* or Rm,Rn */
969 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
970 return;
971 case 0x400c: /* shad Rm,Rn */
973 int label1 = gen_new_label();
974 int label2 = gen_new_label();
975 int label3 = gen_new_label();
976 int label4 = gen_new_label();
977 TCGv shift;
978 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
979 /* Rm positive, shift to the left */
980 shift = tcg_temp_new();
981 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
982 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
983 tcg_temp_free(shift);
984 tcg_gen_br(label4);
985 /* Rm negative, shift to the right */
986 gen_set_label(label1);
987 shift = tcg_temp_new();
988 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
989 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
990 tcg_gen_not_i32(shift, REG(B7_4));
991 tcg_gen_andi_i32(shift, shift, 0x1f);
992 tcg_gen_addi_i32(shift, shift, 1);
993 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
994 tcg_temp_free(shift);
995 tcg_gen_br(label4);
996 /* Rm = -32 */
997 gen_set_label(label2);
998 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
999 tcg_gen_movi_i32(REG(B11_8), 0);
1000 tcg_gen_br(label4);
1001 gen_set_label(label3);
1002 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1003 gen_set_label(label4);
1005 return;
1006 case 0x400d: /* shld Rm,Rn */
1008 int label1 = gen_new_label();
1009 int label2 = gen_new_label();
1010 int label3 = gen_new_label();
1011 TCGv shift;
1012 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1013 /* Rm positive, shift to the left */
1014 shift = tcg_temp_new();
1015 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1016 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1017 tcg_temp_free(shift);
1018 tcg_gen_br(label3);
1019 /* Rm negative, shift to the right */
1020 gen_set_label(label1);
1021 shift = tcg_temp_new();
1022 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1023 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1024 tcg_gen_not_i32(shift, REG(B7_4));
1025 tcg_gen_andi_i32(shift, shift, 0x1f);
1026 tcg_gen_addi_i32(shift, shift, 1);
1027 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1028 tcg_temp_free(shift);
1029 tcg_gen_br(label3);
1030 /* Rm = -32 */
1031 gen_set_label(label2);
1032 tcg_gen_movi_i32(REG(B11_8), 0);
1033 gen_set_label(label3);
1035 return;
1036 case 0x3008: /* sub Rm,Rn */
1037 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1038 return;
1039 case 0x300a: /* subc Rm,Rn */
1040 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1041 return;
1042 case 0x300b: /* subv Rm,Rn */
1043 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1044 return;
1045 case 0x2008: /* tst Rm,Rn */
1047 TCGv val = tcg_temp_new();
1048 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1049 gen_cmp_imm(TCG_COND_EQ, val, 0);
1050 tcg_temp_free(val);
1052 return;
1053 case 0x200a: /* xor Rm,Rn */
1054 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1055 return;
1056 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1057 CHECK_FPU_ENABLED
1058 if (ctx->fpscr & FPSCR_SZ) {
1059 TCGv_i64 fp = tcg_temp_new_i64();
1060 gen_load_fpr64(fp, XREG(B7_4));
1061 gen_store_fpr64(fp, XREG(B11_8));
1062 tcg_temp_free_i64(fp);
1063 } else {
1064 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1066 return;
1067 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1068 CHECK_FPU_ENABLED
1069 if (ctx->fpscr & FPSCR_SZ) {
1070 TCGv addr_hi = tcg_temp_new();
1071 int fr = XREG(B7_4);
1072 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1073 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1074 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1075 tcg_temp_free(addr_hi);
1076 } else {
1077 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1079 return;
1080 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1081 CHECK_FPU_ENABLED
1082 if (ctx->fpscr & FPSCR_SZ) {
1083 TCGv addr_hi = tcg_temp_new();
1084 int fr = XREG(B11_8);
1085 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1086 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1087 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1088 tcg_temp_free(addr_hi);
1089 } else {
1090 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1092 return;
1093 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1094 CHECK_FPU_ENABLED
1095 if (ctx->fpscr & FPSCR_SZ) {
1096 TCGv addr_hi = tcg_temp_new();
1097 int fr = XREG(B11_8);
1098 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1099 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1100 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1101 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1102 tcg_temp_free(addr_hi);
1103 } else {
1104 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1105 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1107 return;
1108 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1109 CHECK_FPU_ENABLED
1110 if (ctx->fpscr & FPSCR_SZ) {
1111 TCGv addr = tcg_temp_new_i32();
1112 int fr = XREG(B7_4);
1113 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1114 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1115 tcg_gen_subi_i32(addr, addr, 4);
1116 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1117 tcg_gen_mov_i32(REG(B11_8), addr);
1118 tcg_temp_free(addr);
1119 } else {
1120 TCGv addr;
1121 addr = tcg_temp_new_i32();
1122 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1123 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1124 tcg_gen_mov_i32(REG(B11_8), addr);
1125 tcg_temp_free(addr);
1127 return;
1128 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1129 CHECK_FPU_ENABLED
1131 TCGv addr = tcg_temp_new_i32();
1132 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1133 if (ctx->fpscr & FPSCR_SZ) {
1134 int fr = XREG(B11_8);
1135 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1136 tcg_gen_addi_i32(addr, addr, 4);
1137 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1138 } else {
1139 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1141 tcg_temp_free(addr);
1143 return;
1144 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1145 CHECK_FPU_ENABLED
1147 TCGv addr = tcg_temp_new();
1148 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1149 if (ctx->fpscr & FPSCR_SZ) {
1150 int fr = XREG(B7_4);
1151 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1152 tcg_gen_addi_i32(addr, addr, 4);
1153 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1154 } else {
1155 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1157 tcg_temp_free(addr);
1159 return;
1160 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1161 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1162 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1163 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1164 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1165 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1167 CHECK_FPU_ENABLED
1168 if (ctx->fpscr & FPSCR_PR) {
1169 TCGv_i64 fp0, fp1;
1171 if (ctx->opcode & 0x0110)
1172 break; /* illegal instruction */
1173 fp0 = tcg_temp_new_i64();
1174 fp1 = tcg_temp_new_i64();
1175 gen_load_fpr64(fp0, DREG(B11_8));
1176 gen_load_fpr64(fp1, DREG(B7_4));
1177 switch (ctx->opcode & 0xf00f) {
1178 case 0xf000: /* fadd Rm,Rn */
1179 gen_helper_fadd_DT(fp0, fp0, fp1);
1180 break;
1181 case 0xf001: /* fsub Rm,Rn */
1182 gen_helper_fsub_DT(fp0, fp0, fp1);
1183 break;
1184 case 0xf002: /* fmul Rm,Rn */
1185 gen_helper_fmul_DT(fp0, fp0, fp1);
1186 break;
1187 case 0xf003: /* fdiv Rm,Rn */
1188 gen_helper_fdiv_DT(fp0, fp0, fp1);
1189 break;
1190 case 0xf004: /* fcmp/eq Rm,Rn */
1191 gen_helper_fcmp_eq_DT(fp0, fp1);
1192 return;
1193 case 0xf005: /* fcmp/gt Rm,Rn */
1194 gen_helper_fcmp_gt_DT(fp0, fp1);
1195 return;
1197 gen_store_fpr64(fp0, DREG(B11_8));
1198 tcg_temp_free_i64(fp0);
1199 tcg_temp_free_i64(fp1);
1200 } else {
1201 switch (ctx->opcode & 0xf00f) {
1202 case 0xf000: /* fadd Rm,Rn */
1203 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1204 break;
1205 case 0xf001: /* fsub Rm,Rn */
1206 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1207 break;
1208 case 0xf002: /* fmul Rm,Rn */
1209 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1210 break;
1211 case 0xf003: /* fdiv Rm,Rn */
1212 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1213 break;
1214 case 0xf004: /* fcmp/eq Rm,Rn */
1215 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1216 return;
1217 case 0xf005: /* fcmp/gt Rm,Rn */
1218 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1219 return;
1223 return;
1224 case 0xf00e: /* fmac FR0,RM,Rn */
1226 CHECK_FPU_ENABLED
1227 if (ctx->fpscr & FPSCR_PR) {
1228 break; /* illegal instruction */
1229 } else {
1230 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1231 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1232 return;
1237 switch (ctx->opcode & 0xff00) {
1238 case 0xc900: /* and #imm,R0 */
1239 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1240 return;
1241 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1243 TCGv addr, val;
1244 addr = tcg_temp_new();
1245 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1246 val = tcg_temp_new();
1247 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1248 tcg_gen_andi_i32(val, val, B7_0);
1249 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1250 tcg_temp_free(val);
1251 tcg_temp_free(addr);
1253 return;
1254 case 0x8b00: /* bf label */
1255 CHECK_NOT_DELAY_SLOT
1256 gen_conditional_jump(ctx, ctx->pc + 2,
1257 ctx->pc + 4 + B7_0s * 2);
1258 ctx->bstate = BS_BRANCH;
1259 return;
1260 case 0x8f00: /* bf/s label */
1261 CHECK_NOT_DELAY_SLOT
1262 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1263 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1264 return;
1265 case 0x8900: /* bt label */
1266 CHECK_NOT_DELAY_SLOT
1267 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1268 ctx->pc + 2);
1269 ctx->bstate = BS_BRANCH;
1270 return;
1271 case 0x8d00: /* bt/s label */
1272 CHECK_NOT_DELAY_SLOT
1273 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1274 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1275 return;
1276 case 0x8800: /* cmp/eq #imm,R0 */
1277 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1278 return;
1279 case 0xc400: /* mov.b @(disp,GBR),R0 */
1281 TCGv addr = tcg_temp_new();
1282 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1283 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1284 tcg_temp_free(addr);
1286 return;
1287 case 0xc500: /* mov.w @(disp,GBR),R0 */
1289 TCGv addr = tcg_temp_new();
1290 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1291 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1292 tcg_temp_free(addr);
1294 return;
1295 case 0xc600: /* mov.l @(disp,GBR),R0 */
1297 TCGv addr = tcg_temp_new();
1298 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1299 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1300 tcg_temp_free(addr);
1302 return;
1303 case 0xc000: /* mov.b R0,@(disp,GBR) */
1305 TCGv addr = tcg_temp_new();
1306 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1307 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1308 tcg_temp_free(addr);
1310 return;
1311 case 0xc100: /* mov.w R0,@(disp,GBR) */
1313 TCGv addr = tcg_temp_new();
1314 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1315 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1316 tcg_temp_free(addr);
1318 return;
1319 case 0xc200: /* mov.l R0,@(disp,GBR) */
1321 TCGv addr = tcg_temp_new();
1322 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1323 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1324 tcg_temp_free(addr);
1326 return;
1327 case 0x8000: /* mov.b R0,@(disp,Rn) */
1329 TCGv addr = tcg_temp_new();
1330 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1331 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1332 tcg_temp_free(addr);
1334 return;
1335 case 0x8100: /* mov.w R0,@(disp,Rn) */
1337 TCGv addr = tcg_temp_new();
1338 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1339 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1340 tcg_temp_free(addr);
1342 return;
1343 case 0x8400: /* mov.b @(disp,Rn),R0 */
1345 TCGv addr = tcg_temp_new();
1346 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1347 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1348 tcg_temp_free(addr);
1350 return;
1351 case 0x8500: /* mov.w @(disp,Rn),R0 */
1353 TCGv addr = tcg_temp_new();
1354 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1355 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1356 tcg_temp_free(addr);
1358 return;
1359 case 0xc700: /* mova @(disp,PC),R0 */
1360 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1361 return;
1362 case 0xcb00: /* or #imm,R0 */
1363 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1364 return;
1365 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1367 TCGv addr, val;
1368 addr = tcg_temp_new();
1369 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1370 val = tcg_temp_new();
1371 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1372 tcg_gen_ori_i32(val, val, B7_0);
1373 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1374 tcg_temp_free(val);
1375 tcg_temp_free(addr);
1377 return;
1378 case 0xc300: /* trapa #imm */
1380 TCGv imm;
1381 CHECK_NOT_DELAY_SLOT
1382 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1383 imm = tcg_const_i32(B7_0);
1384 gen_helper_trapa(imm);
1385 tcg_temp_free(imm);
1386 ctx->bstate = BS_BRANCH;
1388 return;
1389 case 0xc800: /* tst #imm,R0 */
1391 TCGv val = tcg_temp_new();
1392 tcg_gen_andi_i32(val, REG(0), B7_0);
1393 gen_cmp_imm(TCG_COND_EQ, val, 0);
1394 tcg_temp_free(val);
1396 return;
1397 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1399 TCGv val = tcg_temp_new();
1400 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1401 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1402 tcg_gen_andi_i32(val, val, B7_0);
1403 gen_cmp_imm(TCG_COND_EQ, val, 0);
1404 tcg_temp_free(val);
1406 return;
1407 case 0xca00: /* xor #imm,R0 */
1408 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1409 return;
1410 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1412 TCGv addr, val;
1413 addr = tcg_temp_new();
1414 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1415 val = tcg_temp_new();
1416 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1417 tcg_gen_xori_i32(val, val, B7_0);
1418 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1419 tcg_temp_free(val);
1420 tcg_temp_free(addr);
1422 return;
1425 switch (ctx->opcode & 0xf08f) {
1426 case 0x408e: /* ldc Rm,Rn_BANK */
1427 CHECK_PRIVILEGED
1428 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1429 return;
1430 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1431 CHECK_PRIVILEGED
1432 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 return;
1435 case 0x0082: /* stc Rm_BANK,Rn */
1436 CHECK_PRIVILEGED
1437 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1438 return;
1439 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1440 CHECK_PRIVILEGED
1442 TCGv addr = tcg_temp_new();
1443 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1444 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1445 tcg_gen_mov_i32(REG(B11_8), addr);
1446 tcg_temp_free(addr);
1448 return;
1451 switch (ctx->opcode & 0xf0ff) {
1452 case 0x0023: /* braf Rn */
1453 CHECK_NOT_DELAY_SLOT
1454 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1455 ctx->flags |= DELAY_SLOT;
1456 ctx->delayed_pc = (uint32_t) - 1;
1457 return;
1458 case 0x0003: /* bsrf Rn */
1459 CHECK_NOT_DELAY_SLOT
1460 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1461 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1462 ctx->flags |= DELAY_SLOT;
1463 ctx->delayed_pc = (uint32_t) - 1;
1464 return;
1465 case 0x4015: /* cmp/pl Rn */
1466 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1467 return;
1468 case 0x4011: /* cmp/pz Rn */
1469 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1470 return;
1471 case 0x4010: /* dt Rn */
1472 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1473 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1474 return;
1475 case 0x402b: /* jmp @Rn */
1476 CHECK_NOT_DELAY_SLOT
1477 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1478 ctx->flags |= DELAY_SLOT;
1479 ctx->delayed_pc = (uint32_t) - 1;
1480 return;
1481 case 0x400b: /* jsr @Rn */
1482 CHECK_NOT_DELAY_SLOT
1483 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1484 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1485 ctx->flags |= DELAY_SLOT;
1486 ctx->delayed_pc = (uint32_t) - 1;
1487 return;
1488 case 0x400e: /* ldc Rm,SR */
1489 CHECK_PRIVILEGED
1490 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1491 ctx->bstate = BS_STOP;
1492 return;
1493 case 0x4007: /* ldc.l @Rm+,SR */
1494 CHECK_PRIVILEGED
1496 TCGv val = tcg_temp_new();
1497 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1498 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1499 tcg_temp_free(val);
1500 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1501 ctx->bstate = BS_STOP;
1503 return;
1504 case 0x0002: /* stc SR,Rn */
1505 CHECK_PRIVILEGED
1506 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1507 return;
1508 case 0x4003: /* stc SR,@-Rn */
1509 CHECK_PRIVILEGED
1511 TCGv addr = tcg_temp_new();
1512 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1513 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1514 tcg_gen_mov_i32(REG(B11_8), addr);
1515 tcg_temp_free(addr);
1517 return;
1518 #define LD(reg,ldnum,ldpnum,prechk) \
1519 case ldnum: \
1520 prechk \
1521 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1522 return; \
1523 case ldpnum: \
1524 prechk \
1525 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1526 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1527 return;
1528 #define ST(reg,stnum,stpnum,prechk) \
1529 case stnum: \
1530 prechk \
1531 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1532 return; \
1533 case stpnum: \
1534 prechk \
1536 TCGv addr = tcg_temp_new(); \
1537 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1538 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1539 tcg_gen_mov_i32(REG(B11_8), addr); \
1540 tcg_temp_free(addr); \
1542 return;
1543 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1544 LD(reg,ldnum,ldpnum,prechk) \
1545 ST(reg,stnum,stpnum,prechk)
1546 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1547 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1548 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1549 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1550 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1551 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1552 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1553 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1554 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1555 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1556 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1557 case 0x406a: /* lds Rm,FPSCR */
1558 CHECK_FPU_ENABLED
1559 gen_helper_ld_fpscr(REG(B11_8));
1560 ctx->bstate = BS_STOP;
1561 return;
1562 case 0x4066: /* lds.l @Rm+,FPSCR */
1563 CHECK_FPU_ENABLED
1565 TCGv addr = tcg_temp_new();
1566 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1567 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1568 gen_helper_ld_fpscr(addr);
1569 tcg_temp_free(addr);
1570 ctx->bstate = BS_STOP;
1572 return;
1573 case 0x006a: /* sts FPSCR,Rn */
1574 CHECK_FPU_ENABLED
1575 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1576 return;
1577 case 0x4062: /* sts FPSCR,@-Rn */
1578 CHECK_FPU_ENABLED
1580 TCGv addr, val;
1581 val = tcg_temp_new();
1582 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1583 addr = tcg_temp_new();
1584 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1585 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1586 tcg_gen_mov_i32(REG(B11_8), addr);
1587 tcg_temp_free(addr);
1588 tcg_temp_free(val);
1590 return;
1591 case 0x00c3: /* movca.l R0,@Rm */
1593 TCGv val = tcg_temp_new();
1594 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1595 gen_helper_movcal (REG(B11_8), val);
1596 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1598 ctx->has_movcal = 1;
1599 return;
1600 case 0x40a9:
1601 /* MOVUA.L @Rm,R0 (Rm) -> R0
1602 Load non-boundary-aligned data */
1603 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1604 return;
1605 case 0x40e9:
1606 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1607 Load non-boundary-aligned data */
1608 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1609 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1610 return;
1611 case 0x0029: /* movt Rn */
1612 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1613 return;
1614 case 0x0073:
1615 /* MOVCO.L
1616 LDST -> T
1617 If (T == 1) R0 -> (Rn)
1618 0 -> LDST
1620 if (ctx->features & SH_FEATURE_SH4A) {
1621 int label = gen_new_label();
1622 gen_clr_t();
1623 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1624 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1625 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1626 gen_set_label(label);
1627 tcg_gen_movi_i32(cpu_ldst, 0);
1628 return;
1629 } else
1630 break;
1631 case 0x0063:
1632 /* MOVLI.L @Rm,R0
1633 1 -> LDST
1634 (Rm) -> R0
1635 When interrupt/exception
1636 occurred 0 -> LDST
1638 if (ctx->features & SH_FEATURE_SH4A) {
1639 tcg_gen_movi_i32(cpu_ldst, 0);
1640 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1641 tcg_gen_movi_i32(cpu_ldst, 1);
1642 return;
1643 } else
1644 break;
1645 case 0x0093: /* ocbi @Rn */
1647 gen_helper_ocbi (REG(B11_8));
1649 return;
1650 case 0x00a3: /* ocbp @Rn */
1652 TCGv dummy = tcg_temp_new();
1653 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1654 tcg_temp_free(dummy);
1656 return;
1657 case 0x00b3: /* ocbwb @Rn */
1659 TCGv dummy = tcg_temp_new();
1660 tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx);
1661 tcg_temp_free(dummy);
1663 return;
1664 case 0x0083: /* pref @Rn */
1665 return;
1666 case 0x00d3: /* prefi @Rn */
1667 if (ctx->features & SH_FEATURE_SH4A)
1668 return;
1669 else
1670 break;
1671 case 0x00e3: /* icbi @Rn */
1672 if (ctx->features & SH_FEATURE_SH4A)
1673 return;
1674 else
1675 break;
1676 case 0x00ab: /* synco */
1677 if (ctx->features & SH_FEATURE_SH4A)
1678 return;
1679 else
1680 break;
1681 case 0x4024: /* rotcl Rn */
1683 TCGv tmp = tcg_temp_new();
1684 tcg_gen_mov_i32(tmp, cpu_sr);
1685 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1686 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1687 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1688 tcg_temp_free(tmp);
1690 return;
1691 case 0x4025: /* rotcr Rn */
1693 TCGv tmp = tcg_temp_new();
1694 tcg_gen_mov_i32(tmp, cpu_sr);
1695 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1696 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1697 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1698 tcg_temp_free(tmp);
1700 return;
1701 case 0x4004: /* rotl Rn */
1702 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1703 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1704 gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0);
1705 return;
1706 case 0x4005: /* rotr Rn */
1707 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1708 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1709 gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0);
1710 return;
1711 case 0x4000: /* shll Rn */
1712 case 0x4020: /* shal Rn */
1713 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1714 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1715 return;
1716 case 0x4021: /* shar Rn */
1717 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1718 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1719 return;
1720 case 0x4001: /* shlr Rn */
1721 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1722 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1723 return;
1724 case 0x4008: /* shll2 Rn */
1725 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1726 return;
1727 case 0x4018: /* shll8 Rn */
1728 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1729 return;
1730 case 0x4028: /* shll16 Rn */
1731 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1732 return;
1733 case 0x4009: /* shlr2 Rn */
1734 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1735 return;
1736 case 0x4019: /* shlr8 Rn */
1737 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1738 return;
1739 case 0x4029: /* shlr16 Rn */
1740 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1741 return;
1742 case 0x401b: /* tas.b @Rn */
1744 TCGv addr, val;
1745 addr = tcg_temp_local_new();
1746 tcg_gen_mov_i32(addr, REG(B11_8));
1747 val = tcg_temp_local_new();
1748 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1749 gen_cmp_imm(TCG_COND_EQ, val, 0);
1750 tcg_gen_ori_i32(val, val, 0x80);
1751 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1752 tcg_temp_free(val);
1753 tcg_temp_free(addr);
1755 return;
1756 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1757 CHECK_FPU_ENABLED
1758 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1759 return;
1760 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1761 CHECK_FPU_ENABLED
1762 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1763 return;
1764 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1765 CHECK_FPU_ENABLED
1766 if (ctx->fpscr & FPSCR_PR) {
1767 TCGv_i64 fp;
1768 if (ctx->opcode & 0x0100)
1769 break; /* illegal instruction */
1770 fp = tcg_temp_new_i64();
1771 gen_helper_float_DT(fp, cpu_fpul);
1772 gen_store_fpr64(fp, DREG(B11_8));
1773 tcg_temp_free_i64(fp);
1775 else {
1776 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1778 return;
1779 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1780 CHECK_FPU_ENABLED
1781 if (ctx->fpscr & FPSCR_PR) {
1782 TCGv_i64 fp;
1783 if (ctx->opcode & 0x0100)
1784 break; /* illegal instruction */
1785 fp = tcg_temp_new_i64();
1786 gen_load_fpr64(fp, DREG(B11_8));
1787 gen_helper_ftrc_DT(cpu_fpul, fp);
1788 tcg_temp_free_i64(fp);
1790 else {
1791 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1793 return;
1794 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1795 CHECK_FPU_ENABLED
1797 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1799 return;
1800 case 0xf05d: /* fabs FRn/DRn */
1801 CHECK_FPU_ENABLED
1802 if (ctx->fpscr & FPSCR_PR) {
1803 if (ctx->opcode & 0x0100)
1804 break; /* illegal instruction */
1805 TCGv_i64 fp = tcg_temp_new_i64();
1806 gen_load_fpr64(fp, DREG(B11_8));
1807 gen_helper_fabs_DT(fp, fp);
1808 gen_store_fpr64(fp, DREG(B11_8));
1809 tcg_temp_free_i64(fp);
1810 } else {
1811 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1813 return;
1814 case 0xf06d: /* fsqrt FRn */
1815 CHECK_FPU_ENABLED
1816 if (ctx->fpscr & FPSCR_PR) {
1817 if (ctx->opcode & 0x0100)
1818 break; /* illegal instruction */
1819 TCGv_i64 fp = tcg_temp_new_i64();
1820 gen_load_fpr64(fp, DREG(B11_8));
1821 gen_helper_fsqrt_DT(fp, fp);
1822 gen_store_fpr64(fp, DREG(B11_8));
1823 tcg_temp_free_i64(fp);
1824 } else {
1825 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1827 return;
1828 case 0xf07d: /* fsrra FRn */
1829 CHECK_FPU_ENABLED
1830 break;
1831 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1832 CHECK_FPU_ENABLED
1833 if (!(ctx->fpscr & FPSCR_PR)) {
1834 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1836 return;
1837 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1838 CHECK_FPU_ENABLED
1839 if (!(ctx->fpscr & FPSCR_PR)) {
1840 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1842 return;
1843 case 0xf0ad: /* fcnvsd FPUL,DRn */
1844 CHECK_FPU_ENABLED
1846 TCGv_i64 fp = tcg_temp_new_i64();
1847 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1848 gen_store_fpr64(fp, DREG(B11_8));
1849 tcg_temp_free_i64(fp);
1851 return;
1852 case 0xf0bd: /* fcnvds DRn,FPUL */
1853 CHECK_FPU_ENABLED
1855 TCGv_i64 fp = tcg_temp_new_i64();
1856 gen_load_fpr64(fp, DREG(B11_8));
1857 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1858 tcg_temp_free_i64(fp);
1860 return;
1862 #if 0
1863 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1864 ctx->opcode, ctx->pc);
1865 fflush(stderr);
1866 #endif
1867 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1868 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1869 gen_helper_raise_slot_illegal_instruction();
1870 } else {
1871 gen_helper_raise_illegal_instruction();
1873 ctx->bstate = BS_EXCP;
1876 static void decode_opc(DisasContext * ctx)
1878 uint32_t old_flags = ctx->flags;
1880 _decode_opc(ctx);
1882 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1883 if (ctx->flags & DELAY_SLOT_CLEARME) {
1884 gen_store_flags(0);
1885 } else {
1886 /* go out of the delay slot */
1887 uint32_t new_flags = ctx->flags;
1888 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1889 gen_store_flags(new_flags);
1891 ctx->flags = 0;
1892 ctx->bstate = BS_BRANCH;
1893 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1894 gen_delayed_conditional_jump(ctx);
1895 } else if (old_flags & DELAY_SLOT) {
1896 gen_jump(ctx);
1901 /* go into a delay slot */
1902 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1903 gen_store_flags(ctx->flags);
1906 static inline void
1907 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1908 int search_pc)
1910 DisasContext ctx;
1911 target_ulong pc_start;
1912 static uint16_t *gen_opc_end;
1913 CPUBreakpoint *bp;
1914 int i, ii;
1915 int num_insns;
1916 int max_insns;
1918 pc_start = tb->pc;
1919 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1920 ctx.pc = pc_start;
1921 ctx.flags = (uint32_t)tb->flags;
1922 ctx.bstate = BS_NONE;
1923 ctx.sr = env->sr;
1924 ctx.fpscr = env->fpscr;
1925 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1926 /* We don't know if the delayed pc came from a dynamic or static branch,
1927 so assume it is a dynamic branch. */
1928 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1929 ctx.tb = tb;
1930 ctx.singlestep_enabled = env->singlestep_enabled;
1931 ctx.features = env->features;
1932 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1934 ii = -1;
1935 num_insns = 0;
1936 max_insns = tb->cflags & CF_COUNT_MASK;
1937 if (max_insns == 0)
1938 max_insns = CF_COUNT_MASK;
1939 gen_icount_start();
1940 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1941 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1942 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1943 if (ctx.pc == bp->pc) {
1944 /* We have hit a breakpoint - make sure PC is up-to-date */
1945 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1946 gen_helper_debug();
1947 ctx.bstate = BS_EXCP;
1948 break;
1952 if (search_pc) {
1953 i = gen_opc_ptr - gen_opc_buf;
1954 if (ii < i) {
1955 ii++;
1956 while (ii < i)
1957 gen_opc_instr_start[ii++] = 0;
1959 gen_opc_pc[ii] = ctx.pc;
1960 gen_opc_hflags[ii] = ctx.flags;
1961 gen_opc_instr_start[ii] = 1;
1962 gen_opc_icount[ii] = num_insns;
1964 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1965 gen_io_start();
1966 #if 0
1967 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1968 fflush(stderr);
1969 #endif
1970 ctx.opcode = lduw_code(ctx.pc);
1971 decode_opc(&ctx);
1972 num_insns++;
1973 ctx.pc += 2;
1974 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1975 break;
1976 if (env->singlestep_enabled)
1977 break;
1978 if (num_insns >= max_insns)
1979 break;
1980 if (singlestep)
1981 break;
1983 if (tb->cflags & CF_LAST_IO)
1984 gen_io_end();
1985 if (env->singlestep_enabled) {
1986 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1987 gen_helper_debug();
1988 } else {
1989 switch (ctx.bstate) {
1990 case BS_STOP:
1991 /* gen_op_interrupt_restart(); */
1992 /* fall through */
1993 case BS_NONE:
1994 if (ctx.flags) {
1995 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1997 gen_goto_tb(&ctx, 0, ctx.pc);
1998 break;
1999 case BS_EXCP:
2000 /* gen_op_interrupt_restart(); */
2001 tcg_gen_exit_tb(0);
2002 break;
2003 case BS_BRANCH:
2004 default:
2005 break;
2009 gen_icount_end(tb, num_insns);
2010 *gen_opc_ptr = INDEX_op_end;
2011 if (search_pc) {
2012 i = gen_opc_ptr - gen_opc_buf;
2013 ii++;
2014 while (ii <= i)
2015 gen_opc_instr_start[ii++] = 0;
2016 } else {
2017 tb->size = ctx.pc - pc_start;
2018 tb->icount = num_insns;
2021 #ifdef DEBUG_DISAS
2022 #ifdef SH4_DEBUG_DISAS
2023 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2024 #endif
2025 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2026 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2027 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2028 qemu_log("\n");
2030 #endif
2033 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2035 gen_intermediate_code_internal(env, tb, 0);
2038 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2040 gen_intermediate_code_internal(env, tb, 1);
2043 void gen_pc_load(CPUState *env, TranslationBlock *tb,
2044 unsigned long searched_pc, int pc_pos, void *puc)
2046 env->pc = gen_opc_pc[pc_pos];
2047 env->flags = gen_opc_hflags[pc_pos];