pci: add accessors to get/set registers by mask
[qemu/ar7.git] / target-sh4 / translate.c
blobaacf96d9a374c3a9512f47db985901ecfd60e5ab
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #define DEBUG_DISAS
26 #define SH4_DEBUG_DISAS
27 //#define SH4_SINGLE_STEP
29 #include "cpu.h"
30 #include "disas.h"
31 #include "tcg-op.h"
32 #include "qemu-common.h"
34 #include "helper.h"
35 #define GEN_HELPER 1
36 #include "helper.h"
38 typedef struct DisasContext {
39 struct TranslationBlock *tb;
40 target_ulong pc;
41 uint32_t sr;
42 uint32_t fpscr;
43 uint16_t opcode;
44 uint32_t flags;
45 int bstate;
46 int memidx;
47 uint32_t delayed_pc;
48 int singlestep_enabled;
49 uint32_t features;
50 int has_movcal;
51 } DisasContext;
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(ctx) 1
55 #else
56 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
57 #endif
59 enum {
60 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
61 * exception condition
63 BS_STOP = 1, /* We want to stop translation for any reason */
64 BS_BRANCH = 2, /* We reached a branch condition */
65 BS_EXCP = 3, /* We reached an exception condition */
68 /* global register indexes */
69 static TCGv_ptr cpu_env;
70 static TCGv cpu_gregs[24];
71 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
72 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
73 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
74 static TCGv cpu_fregs[32];
76 /* internal register indexes */
77 static TCGv cpu_flags, cpu_delayed_pc;
79 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
81 #include "gen-icount.h"
83 static void sh4_translate_init(void)
85 int i;
86 static int done_init = 0;
87 static const char * const gregnames[24] = {
88 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
89 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
90 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
91 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
92 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
94 static const char * const fregnames[32] = {
95 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
96 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
97 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
98 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
99 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
100 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
101 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
102 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
105 if (done_init)
106 return;
108 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
110 for (i = 0; i < 24; i++)
111 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, gregs[i]),
113 gregnames[i]);
115 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUState, pc), "PC");
117 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUState, sr), "SR");
119 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUState, ssr), "SSR");
121 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUState, spc), "SPC");
123 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUState, gbr), "GBR");
125 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUState, vbr), "VBR");
127 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUState, sgr), "SGR");
129 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUState, dbr), "DBR");
131 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUState, mach), "MACH");
133 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUState, macl), "MACL");
135 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUState, pr), "PR");
137 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
138 offsetof(CPUState, fpscr), "FPSCR");
139 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
140 offsetof(CPUState, fpul), "FPUL");
142 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUState, flags), "_flags_");
144 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
145 offsetof(CPUState, delayed_pc),
146 "_delayed_pc_");
147 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
148 offsetof(CPUState, ldst), "_ldst_");
150 for (i = 0; i < 32; i++)
151 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
152 offsetof(CPUState, fregs[i]),
153 fregnames[i]);
155 /* register helpers */
156 #define GEN_HELPER 2
157 #include "helper.h"
159 done_init = 1;
162 void cpu_dump_state(CPUState * env, FILE * f,
163 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
164 int flags)
166 int i;
167 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
168 env->pc, env->sr, env->pr, env->fpscr);
169 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
170 env->spc, env->ssr, env->gbr, env->vbr);
171 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
172 env->sgr, env->dbr, env->delayed_pc, env->fpul);
173 for (i = 0; i < 24; i += 4) {
174 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
175 i, env->gregs[i], i + 1, env->gregs[i + 1],
176 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
178 if (env->flags & DELAY_SLOT) {
179 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
181 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
182 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
183 env->delayed_pc);
187 void cpu_reset(CPUSH4State * env)
189 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
190 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
191 log_cpu_state(env, 0);
194 memset(env, 0, offsetof(CPUSH4State, breakpoints));
195 tlb_flush(env, 1);
197 env->pc = 0xA0000000;
198 #if defined(CONFIG_USER_ONLY)
199 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
200 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
201 #else
202 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
203 env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
204 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
205 set_flush_to_zero(1, &env->fp_status);
206 #endif
207 set_default_nan_mode(1, &env->fp_status);
210 typedef struct {
211 const char *name;
212 int id;
213 uint32_t pvr;
214 uint32_t prr;
215 uint32_t cvr;
216 uint32_t features;
217 } sh4_def_t;
219 static sh4_def_t sh4_defs[] = {
221 .name = "SH7750R",
222 .id = SH_CPU_SH7750R,
223 .pvr = 0x00050000,
224 .prr = 0x00000100,
225 .cvr = 0x00110000,
226 .features = SH_FEATURE_BCR3_AND_BCR4,
227 }, {
228 .name = "SH7751R",
229 .id = SH_CPU_SH7751R,
230 .pvr = 0x04050005,
231 .prr = 0x00000113,
232 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
233 .features = SH_FEATURE_BCR3_AND_BCR4,
234 }, {
235 .name = "SH7785",
236 .id = SH_CPU_SH7785,
237 .pvr = 0x10300700,
238 .prr = 0x00000200,
239 .cvr = 0x71440211,
240 .features = SH_FEATURE_SH4A,
244 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
246 int i;
248 if (strcasecmp(name, "any") == 0)
249 return &sh4_defs[0];
251 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
252 if (strcasecmp(name, sh4_defs[i].name) == 0)
253 return &sh4_defs[i];
255 return NULL;
258 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
260 int i;
262 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
263 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
266 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
268 env->pvr = def->pvr;
269 env->prr = def->prr;
270 env->cvr = def->cvr;
271 env->id = def->id;
274 CPUSH4State *cpu_sh4_init(const char *cpu_model)
276 CPUSH4State *env;
277 const sh4_def_t *def;
279 def = cpu_sh4_find_by_name(cpu_model);
280 if (!def)
281 return NULL;
282 env = g_malloc0(sizeof(CPUSH4State));
283 env->features = def->features;
284 cpu_exec_init(env);
285 env->movcal_backup_tail = &(env->movcal_backup);
286 sh4_translate_init();
287 env->cpu_model_str = cpu_model;
288 cpu_reset(env);
289 cpu_register(env, def);
290 qemu_init_vcpu(env);
291 return env;
294 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
296 TranslationBlock *tb;
297 tb = ctx->tb;
299 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
300 !ctx->singlestep_enabled) {
301 /* Use a direct jump if in same page and singlestep not enabled */
302 tcg_gen_goto_tb(n);
303 tcg_gen_movi_i32(cpu_pc, dest);
304 tcg_gen_exit_tb((tcg_target_long)tb + n);
305 } else {
306 tcg_gen_movi_i32(cpu_pc, dest);
307 if (ctx->singlestep_enabled)
308 gen_helper_debug();
309 tcg_gen_exit_tb(0);
313 static void gen_jump(DisasContext * ctx)
315 if (ctx->delayed_pc == (uint32_t) - 1) {
316 /* Target is not statically known, it comes necessarily from a
317 delayed jump as immediate jump are conditinal jumps */
318 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
319 if (ctx->singlestep_enabled)
320 gen_helper_debug();
321 tcg_gen_exit_tb(0);
322 } else {
323 gen_goto_tb(ctx, 0, ctx->delayed_pc);
327 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
329 TCGv sr;
330 int label = gen_new_label();
331 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
332 sr = tcg_temp_new();
333 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
334 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
335 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
336 gen_set_label(label);
339 /* Immediate conditional jump (bt or bf) */
340 static void gen_conditional_jump(DisasContext * ctx,
341 target_ulong ift, target_ulong ifnott)
343 int l1;
344 TCGv sr;
346 l1 = gen_new_label();
347 sr = tcg_temp_new();
348 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
349 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
350 gen_goto_tb(ctx, 0, ifnott);
351 gen_set_label(l1);
352 gen_goto_tb(ctx, 1, ift);
355 /* Delayed conditional jump (bt or bf) */
356 static void gen_delayed_conditional_jump(DisasContext * ctx)
358 int l1;
359 TCGv ds;
361 l1 = gen_new_label();
362 ds = tcg_temp_new();
363 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
364 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
365 gen_goto_tb(ctx, 1, ctx->pc + 2);
366 gen_set_label(l1);
367 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
368 gen_jump(ctx);
371 static inline void gen_set_t(void)
373 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
376 static inline void gen_clr_t(void)
378 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
381 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
383 TCGv t;
385 t = tcg_temp_new();
386 tcg_gen_setcond_i32(cond, t, t1, t0);
387 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
388 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
390 tcg_temp_free(t);
393 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
395 TCGv t;
397 t = tcg_temp_new();
398 tcg_gen_setcondi_i32(cond, t, t0, imm);
399 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
400 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
402 tcg_temp_free(t);
405 static inline void gen_store_flags(uint32_t flags)
407 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
408 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
411 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
413 TCGv tmp = tcg_temp_new();
415 p0 &= 0x1f;
416 p1 &= 0x1f;
418 tcg_gen_andi_i32(tmp, t1, (1 << p1));
419 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
420 if (p0 < p1)
421 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
422 else if (p0 > p1)
423 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
424 tcg_gen_or_i32(t0, t0, tmp);
426 tcg_temp_free(tmp);
429 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
431 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
434 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
436 TCGv_i32 tmp = tcg_temp_new_i32();
437 tcg_gen_trunc_i64_i32(tmp, t);
438 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
439 tcg_gen_shri_i64(t, t, 32);
440 tcg_gen_trunc_i64_i32(tmp, t);
441 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
442 tcg_temp_free_i32(tmp);
445 #define B3_0 (ctx->opcode & 0xf)
446 #define B6_4 ((ctx->opcode >> 4) & 0x7)
447 #define B7_4 ((ctx->opcode >> 4) & 0xf)
448 #define B7_0 (ctx->opcode & 0xff)
449 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
450 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
451 (ctx->opcode & 0xfff))
452 #define B11_8 ((ctx->opcode >> 8) & 0xf)
453 #define B15_12 ((ctx->opcode >> 12) & 0xf)
455 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
456 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
458 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
459 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
461 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
462 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
463 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
464 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
466 #define CHECK_NOT_DELAY_SLOT \
467 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
469 gen_helper_raise_slot_illegal_instruction(); \
470 ctx->bstate = BS_EXCP; \
471 return; \
474 #define CHECK_PRIVILEGED \
475 if (IS_USER(ctx)) { \
476 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
477 gen_helper_raise_slot_illegal_instruction(); \
478 } else { \
479 gen_helper_raise_illegal_instruction(); \
481 ctx->bstate = BS_EXCP; \
482 return; \
485 #define CHECK_FPU_ENABLED \
486 if (ctx->flags & SR_FD) { \
487 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
488 gen_helper_raise_slot_fpu_disable(); \
489 } else { \
490 gen_helper_raise_fpu_disable(); \
492 ctx->bstate = BS_EXCP; \
493 return; \
496 static void _decode_opc(DisasContext * ctx)
498 /* This code tries to make movcal emulation sufficiently
499 accurate for Linux purposes. This instruction writes
500 memory, and prior to that, always allocates a cache line.
501 It is used in two contexts:
502 - in memcpy, where data is copied in blocks, the first write
503 of to a block uses movca.l for performance.
504 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
505 to flush the cache. Here, the data written by movcal.l is never
506 written to memory, and the data written is just bogus.
508 To simulate this, we simulate movcal.l, we store the value to memory,
509 but we also remember the previous content. If we see ocbi, we check
510 if movcal.l for that address was done previously. If so, the write should
511 not have hit the memory, so we restore the previous content.
512 When we see an instruction that is neither movca.l
513 nor ocbi, the previous content is discarded.
515 To optimize, we only try to flush stores when we're at the start of
516 TB, or if we already saw movca.l in this TB and did not flush stores
517 yet. */
518 if (ctx->has_movcal)
520 int opcode = ctx->opcode & 0xf0ff;
521 if (opcode != 0x0093 /* ocbi */
522 && opcode != 0x00c3 /* movca.l */)
524 gen_helper_discard_movcal_backup ();
525 ctx->has_movcal = 0;
529 #if 0
530 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
531 #endif
533 switch (ctx->opcode) {
534 case 0x0019: /* div0u */
535 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
536 return;
537 case 0x000b: /* rts */
538 CHECK_NOT_DELAY_SLOT
539 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
540 ctx->flags |= DELAY_SLOT;
541 ctx->delayed_pc = (uint32_t) - 1;
542 return;
543 case 0x0028: /* clrmac */
544 tcg_gen_movi_i32(cpu_mach, 0);
545 tcg_gen_movi_i32(cpu_macl, 0);
546 return;
547 case 0x0048: /* clrs */
548 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
549 return;
550 case 0x0008: /* clrt */
551 gen_clr_t();
552 return;
553 case 0x0038: /* ldtlb */
554 CHECK_PRIVILEGED
555 gen_helper_ldtlb();
556 return;
557 case 0x002b: /* rte */
558 CHECK_PRIVILEGED
559 CHECK_NOT_DELAY_SLOT
560 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
561 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
562 ctx->flags |= DELAY_SLOT;
563 ctx->delayed_pc = (uint32_t) - 1;
564 return;
565 case 0x0058: /* sets */
566 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
567 return;
568 case 0x0018: /* sett */
569 gen_set_t();
570 return;
571 case 0xfbfd: /* frchg */
572 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
573 ctx->bstate = BS_STOP;
574 return;
575 case 0xf3fd: /* fschg */
576 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
577 ctx->bstate = BS_STOP;
578 return;
579 case 0x0009: /* nop */
580 return;
581 case 0x001b: /* sleep */
582 CHECK_PRIVILEGED
583 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
584 return;
587 switch (ctx->opcode & 0xf000) {
588 case 0x1000: /* mov.l Rm,@(disp,Rn) */
590 TCGv addr = tcg_temp_new();
591 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
592 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
593 tcg_temp_free(addr);
595 return;
596 case 0x5000: /* mov.l @(disp,Rm),Rn */
598 TCGv addr = tcg_temp_new();
599 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
600 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
601 tcg_temp_free(addr);
603 return;
604 case 0xe000: /* mov #imm,Rn */
605 tcg_gen_movi_i32(REG(B11_8), B7_0s);
606 return;
607 case 0x9000: /* mov.w @(disp,PC),Rn */
609 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
610 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
611 tcg_temp_free(addr);
613 return;
614 case 0xd000: /* mov.l @(disp,PC),Rn */
616 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
617 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
618 tcg_temp_free(addr);
620 return;
621 case 0x7000: /* add #imm,Rn */
622 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
623 return;
624 case 0xa000: /* bra disp */
625 CHECK_NOT_DELAY_SLOT
626 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
627 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
628 ctx->flags |= DELAY_SLOT;
629 return;
630 case 0xb000: /* bsr disp */
631 CHECK_NOT_DELAY_SLOT
632 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
633 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
634 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
635 ctx->flags |= DELAY_SLOT;
636 return;
639 switch (ctx->opcode & 0xf00f) {
640 case 0x6003: /* mov Rm,Rn */
641 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
642 return;
643 case 0x2000: /* mov.b Rm,@Rn */
644 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
645 return;
646 case 0x2001: /* mov.w Rm,@Rn */
647 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
648 return;
649 case 0x2002: /* mov.l Rm,@Rn */
650 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
651 return;
652 case 0x6000: /* mov.b @Rm,Rn */
653 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
654 return;
655 case 0x6001: /* mov.w @Rm,Rn */
656 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
657 return;
658 case 0x6002: /* mov.l @Rm,Rn */
659 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
660 return;
661 case 0x2004: /* mov.b Rm,@-Rn */
663 TCGv addr = tcg_temp_new();
664 tcg_gen_subi_i32(addr, REG(B11_8), 1);
665 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
666 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
667 tcg_temp_free(addr);
669 return;
670 case 0x2005: /* mov.w Rm,@-Rn */
672 TCGv addr = tcg_temp_new();
673 tcg_gen_subi_i32(addr, REG(B11_8), 2);
674 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
675 tcg_gen_mov_i32(REG(B11_8), addr);
676 tcg_temp_free(addr);
678 return;
679 case 0x2006: /* mov.l Rm,@-Rn */
681 TCGv addr = tcg_temp_new();
682 tcg_gen_subi_i32(addr, REG(B11_8), 4);
683 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
684 tcg_gen_mov_i32(REG(B11_8), addr);
686 return;
687 case 0x6004: /* mov.b @Rm+,Rn */
688 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
689 if ( B11_8 != B7_4 )
690 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
691 return;
692 case 0x6005: /* mov.w @Rm+,Rn */
693 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
694 if ( B11_8 != B7_4 )
695 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
696 return;
697 case 0x6006: /* mov.l @Rm+,Rn */
698 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
699 if ( B11_8 != B7_4 )
700 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
701 return;
702 case 0x0004: /* mov.b Rm,@(R0,Rn) */
704 TCGv addr = tcg_temp_new();
705 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
706 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
707 tcg_temp_free(addr);
709 return;
710 case 0x0005: /* mov.w Rm,@(R0,Rn) */
712 TCGv addr = tcg_temp_new();
713 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
714 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
715 tcg_temp_free(addr);
717 return;
718 case 0x0006: /* mov.l Rm,@(R0,Rn) */
720 TCGv addr = tcg_temp_new();
721 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
722 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
723 tcg_temp_free(addr);
725 return;
726 case 0x000c: /* mov.b @(R0,Rm),Rn */
728 TCGv addr = tcg_temp_new();
729 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
730 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
731 tcg_temp_free(addr);
733 return;
734 case 0x000d: /* mov.w @(R0,Rm),Rn */
736 TCGv addr = tcg_temp_new();
737 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
738 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
739 tcg_temp_free(addr);
741 return;
742 case 0x000e: /* mov.l @(R0,Rm),Rn */
744 TCGv addr = tcg_temp_new();
745 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
746 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
747 tcg_temp_free(addr);
749 return;
750 case 0x6008: /* swap.b Rm,Rn */
752 TCGv high, low;
753 high = tcg_temp_new();
754 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
755 low = tcg_temp_new();
756 tcg_gen_ext16u_i32(low, REG(B7_4));
757 tcg_gen_bswap16_i32(low, low);
758 tcg_gen_or_i32(REG(B11_8), high, low);
759 tcg_temp_free(low);
760 tcg_temp_free(high);
762 return;
763 case 0x6009: /* swap.w Rm,Rn */
765 TCGv high, low;
766 high = tcg_temp_new();
767 tcg_gen_shli_i32(high, REG(B7_4), 16);
768 low = tcg_temp_new();
769 tcg_gen_shri_i32(low, REG(B7_4), 16);
770 tcg_gen_ext16u_i32(low, low);
771 tcg_gen_or_i32(REG(B11_8), high, low);
772 tcg_temp_free(low);
773 tcg_temp_free(high);
775 return;
776 case 0x200d: /* xtrct Rm,Rn */
778 TCGv high, low;
779 high = tcg_temp_new();
780 tcg_gen_shli_i32(high, REG(B7_4), 16);
781 low = tcg_temp_new();
782 tcg_gen_shri_i32(low, REG(B11_8), 16);
783 tcg_gen_ext16u_i32(low, low);
784 tcg_gen_or_i32(REG(B11_8), high, low);
785 tcg_temp_free(low);
786 tcg_temp_free(high);
788 return;
789 case 0x300c: /* add Rm,Rn */
790 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
791 return;
792 case 0x300e: /* addc Rm,Rn */
793 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
794 return;
795 case 0x300f: /* addv Rm,Rn */
796 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
797 return;
798 case 0x2009: /* and Rm,Rn */
799 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
800 return;
801 case 0x3000: /* cmp/eq Rm,Rn */
802 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
803 return;
804 case 0x3003: /* cmp/ge Rm,Rn */
805 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
806 return;
807 case 0x3007: /* cmp/gt Rm,Rn */
808 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
809 return;
810 case 0x3006: /* cmp/hi Rm,Rn */
811 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
812 return;
813 case 0x3002: /* cmp/hs Rm,Rn */
814 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
815 return;
816 case 0x200c: /* cmp/str Rm,Rn */
818 TCGv cmp1 = tcg_temp_new();
819 TCGv cmp2 = tcg_temp_new();
820 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
821 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
822 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
823 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
824 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
825 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
826 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
827 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
828 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
829 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
830 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
831 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
832 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
833 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
834 tcg_temp_free(cmp2);
835 tcg_temp_free(cmp1);
837 return;
838 case 0x2007: /* div0s Rm,Rn */
840 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
841 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
842 TCGv val = tcg_temp_new();
843 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
844 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
845 tcg_temp_free(val);
847 return;
848 case 0x3004: /* div1 Rm,Rn */
849 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
850 return;
851 case 0x300d: /* dmuls.l Rm,Rn */
853 TCGv_i64 tmp1 = tcg_temp_new_i64();
854 TCGv_i64 tmp2 = tcg_temp_new_i64();
856 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
857 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
858 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
859 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
860 tcg_gen_shri_i64(tmp1, tmp1, 32);
861 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
863 tcg_temp_free_i64(tmp2);
864 tcg_temp_free_i64(tmp1);
866 return;
867 case 0x3005: /* dmulu.l Rm,Rn */
869 TCGv_i64 tmp1 = tcg_temp_new_i64();
870 TCGv_i64 tmp2 = tcg_temp_new_i64();
872 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
873 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
874 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
875 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
876 tcg_gen_shri_i64(tmp1, tmp1, 32);
877 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
879 tcg_temp_free_i64(tmp2);
880 tcg_temp_free_i64(tmp1);
882 return;
883 case 0x600e: /* exts.b Rm,Rn */
884 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
885 return;
886 case 0x600f: /* exts.w Rm,Rn */
887 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
888 return;
889 case 0x600c: /* extu.b Rm,Rn */
890 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
891 return;
892 case 0x600d: /* extu.w Rm,Rn */
893 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
894 return;
895 case 0x000f: /* mac.l @Rm+,@Rn+ */
897 TCGv arg0, arg1;
898 arg0 = tcg_temp_new();
899 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
900 arg1 = tcg_temp_new();
901 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
902 gen_helper_macl(arg0, arg1);
903 tcg_temp_free(arg1);
904 tcg_temp_free(arg0);
905 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
906 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
908 return;
909 case 0x400f: /* mac.w @Rm+,@Rn+ */
911 TCGv arg0, arg1;
912 arg0 = tcg_temp_new();
913 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
914 arg1 = tcg_temp_new();
915 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
916 gen_helper_macw(arg0, arg1);
917 tcg_temp_free(arg1);
918 tcg_temp_free(arg0);
919 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
920 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
922 return;
923 case 0x0007: /* mul.l Rm,Rn */
924 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
925 return;
926 case 0x200f: /* muls.w Rm,Rn */
928 TCGv arg0, arg1;
929 arg0 = tcg_temp_new();
930 tcg_gen_ext16s_i32(arg0, REG(B7_4));
931 arg1 = tcg_temp_new();
932 tcg_gen_ext16s_i32(arg1, REG(B11_8));
933 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
934 tcg_temp_free(arg1);
935 tcg_temp_free(arg0);
937 return;
938 case 0x200e: /* mulu.w Rm,Rn */
940 TCGv arg0, arg1;
941 arg0 = tcg_temp_new();
942 tcg_gen_ext16u_i32(arg0, REG(B7_4));
943 arg1 = tcg_temp_new();
944 tcg_gen_ext16u_i32(arg1, REG(B11_8));
945 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
946 tcg_temp_free(arg1);
947 tcg_temp_free(arg0);
949 return;
950 case 0x600b: /* neg Rm,Rn */
951 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
952 return;
953 case 0x600a: /* negc Rm,Rn */
955 TCGv t0, t1;
956 t0 = tcg_temp_new();
957 tcg_gen_neg_i32(t0, REG(B7_4));
958 t1 = tcg_temp_new();
959 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
960 tcg_gen_sub_i32(REG(B11_8), t0, t1);
961 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
962 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
963 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
964 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
965 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
966 tcg_temp_free(t0);
967 tcg_temp_free(t1);
969 return;
970 case 0x6007: /* not Rm,Rn */
971 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
972 return;
973 case 0x200b: /* or Rm,Rn */
974 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
975 return;
976 case 0x400c: /* shad Rm,Rn */
978 int label1 = gen_new_label();
979 int label2 = gen_new_label();
980 int label3 = gen_new_label();
981 int label4 = gen_new_label();
982 TCGv shift;
983 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
984 /* Rm positive, shift to the left */
985 shift = tcg_temp_new();
986 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
987 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
988 tcg_temp_free(shift);
989 tcg_gen_br(label4);
990 /* Rm negative, shift to the right */
991 gen_set_label(label1);
992 shift = tcg_temp_new();
993 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
994 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
995 tcg_gen_not_i32(shift, REG(B7_4));
996 tcg_gen_andi_i32(shift, shift, 0x1f);
997 tcg_gen_addi_i32(shift, shift, 1);
998 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
999 tcg_temp_free(shift);
1000 tcg_gen_br(label4);
1001 /* Rm = -32 */
1002 gen_set_label(label2);
1003 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
1004 tcg_gen_movi_i32(REG(B11_8), 0);
1005 tcg_gen_br(label4);
1006 gen_set_label(label3);
1007 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1008 gen_set_label(label4);
1010 return;
1011 case 0x400d: /* shld Rm,Rn */
1013 int label1 = gen_new_label();
1014 int label2 = gen_new_label();
1015 int label3 = gen_new_label();
1016 TCGv shift;
1017 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1018 /* Rm positive, shift to the left */
1019 shift = tcg_temp_new();
1020 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1021 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1022 tcg_temp_free(shift);
1023 tcg_gen_br(label3);
1024 /* Rm negative, shift to the right */
1025 gen_set_label(label1);
1026 shift = tcg_temp_new();
1027 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1028 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1029 tcg_gen_not_i32(shift, REG(B7_4));
1030 tcg_gen_andi_i32(shift, shift, 0x1f);
1031 tcg_gen_addi_i32(shift, shift, 1);
1032 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1033 tcg_temp_free(shift);
1034 tcg_gen_br(label3);
1035 /* Rm = -32 */
1036 gen_set_label(label2);
1037 tcg_gen_movi_i32(REG(B11_8), 0);
1038 gen_set_label(label3);
1040 return;
1041 case 0x3008: /* sub Rm,Rn */
1042 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1043 return;
1044 case 0x300a: /* subc Rm,Rn */
1045 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1046 return;
1047 case 0x300b: /* subv Rm,Rn */
1048 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1049 return;
1050 case 0x2008: /* tst Rm,Rn */
1052 TCGv val = tcg_temp_new();
1053 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1054 gen_cmp_imm(TCG_COND_EQ, val, 0);
1055 tcg_temp_free(val);
1057 return;
1058 case 0x200a: /* xor Rm,Rn */
1059 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1060 return;
1061 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1062 CHECK_FPU_ENABLED
1063 if (ctx->fpscr & FPSCR_SZ) {
1064 TCGv_i64 fp = tcg_temp_new_i64();
1065 gen_load_fpr64(fp, XREG(B7_4));
1066 gen_store_fpr64(fp, XREG(B11_8));
1067 tcg_temp_free_i64(fp);
1068 } else {
1069 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1071 return;
1072 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1073 CHECK_FPU_ENABLED
1074 if (ctx->fpscr & FPSCR_SZ) {
1075 TCGv addr_hi = tcg_temp_new();
1076 int fr = XREG(B7_4);
1077 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1078 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1079 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1080 tcg_temp_free(addr_hi);
1081 } else {
1082 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1084 return;
1085 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1086 CHECK_FPU_ENABLED
1087 if (ctx->fpscr & FPSCR_SZ) {
1088 TCGv addr_hi = tcg_temp_new();
1089 int fr = XREG(B11_8);
1090 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1091 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1092 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1093 tcg_temp_free(addr_hi);
1094 } else {
1095 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1097 return;
1098 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1099 CHECK_FPU_ENABLED
1100 if (ctx->fpscr & FPSCR_SZ) {
1101 TCGv addr_hi = tcg_temp_new();
1102 int fr = XREG(B11_8);
1103 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1104 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1105 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1106 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1107 tcg_temp_free(addr_hi);
1108 } else {
1109 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1110 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1112 return;
1113 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1114 CHECK_FPU_ENABLED
1115 if (ctx->fpscr & FPSCR_SZ) {
1116 TCGv addr = tcg_temp_new_i32();
1117 int fr = XREG(B7_4);
1118 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1119 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1120 tcg_gen_subi_i32(addr, addr, 4);
1121 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1122 tcg_gen_mov_i32(REG(B11_8), addr);
1123 tcg_temp_free(addr);
1124 } else {
1125 TCGv addr;
1126 addr = tcg_temp_new_i32();
1127 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1128 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1129 tcg_gen_mov_i32(REG(B11_8), addr);
1130 tcg_temp_free(addr);
1132 return;
1133 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1134 CHECK_FPU_ENABLED
1136 TCGv addr = tcg_temp_new_i32();
1137 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1138 if (ctx->fpscr & FPSCR_SZ) {
1139 int fr = XREG(B11_8);
1140 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1141 tcg_gen_addi_i32(addr, addr, 4);
1142 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1143 } else {
1144 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1146 tcg_temp_free(addr);
1148 return;
1149 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1150 CHECK_FPU_ENABLED
1152 TCGv addr = tcg_temp_new();
1153 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1154 if (ctx->fpscr & FPSCR_SZ) {
1155 int fr = XREG(B7_4);
1156 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1157 tcg_gen_addi_i32(addr, addr, 4);
1158 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1159 } else {
1160 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1162 tcg_temp_free(addr);
1164 return;
1165 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1166 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1167 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1168 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1169 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1170 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1172 CHECK_FPU_ENABLED
1173 if (ctx->fpscr & FPSCR_PR) {
1174 TCGv_i64 fp0, fp1;
1176 if (ctx->opcode & 0x0110)
1177 break; /* illegal instruction */
1178 fp0 = tcg_temp_new_i64();
1179 fp1 = tcg_temp_new_i64();
1180 gen_load_fpr64(fp0, DREG(B11_8));
1181 gen_load_fpr64(fp1, DREG(B7_4));
1182 switch (ctx->opcode & 0xf00f) {
1183 case 0xf000: /* fadd Rm,Rn */
1184 gen_helper_fadd_DT(fp0, fp0, fp1);
1185 break;
1186 case 0xf001: /* fsub Rm,Rn */
1187 gen_helper_fsub_DT(fp0, fp0, fp1);
1188 break;
1189 case 0xf002: /* fmul Rm,Rn */
1190 gen_helper_fmul_DT(fp0, fp0, fp1);
1191 break;
1192 case 0xf003: /* fdiv Rm,Rn */
1193 gen_helper_fdiv_DT(fp0, fp0, fp1);
1194 break;
1195 case 0xf004: /* fcmp/eq Rm,Rn */
1196 gen_helper_fcmp_eq_DT(fp0, fp1);
1197 return;
1198 case 0xf005: /* fcmp/gt Rm,Rn */
1199 gen_helper_fcmp_gt_DT(fp0, fp1);
1200 return;
1202 gen_store_fpr64(fp0, DREG(B11_8));
1203 tcg_temp_free_i64(fp0);
1204 tcg_temp_free_i64(fp1);
1205 } else {
1206 switch (ctx->opcode & 0xf00f) {
1207 case 0xf000: /* fadd Rm,Rn */
1208 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1209 break;
1210 case 0xf001: /* fsub Rm,Rn */
1211 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1212 break;
1213 case 0xf002: /* fmul Rm,Rn */
1214 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1215 break;
1216 case 0xf003: /* fdiv Rm,Rn */
1217 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1218 break;
1219 case 0xf004: /* fcmp/eq Rm,Rn */
1220 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1221 return;
1222 case 0xf005: /* fcmp/gt Rm,Rn */
1223 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1224 return;
1228 return;
1229 case 0xf00e: /* fmac FR0,RM,Rn */
1231 CHECK_FPU_ENABLED
1232 if (ctx->fpscr & FPSCR_PR) {
1233 break; /* illegal instruction */
1234 } else {
1235 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1236 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1237 return;
1242 switch (ctx->opcode & 0xff00) {
1243 case 0xc900: /* and #imm,R0 */
1244 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1245 return;
1246 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1248 TCGv addr, val;
1249 addr = tcg_temp_new();
1250 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1251 val = tcg_temp_new();
1252 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1253 tcg_gen_andi_i32(val, val, B7_0);
1254 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1255 tcg_temp_free(val);
1256 tcg_temp_free(addr);
1258 return;
1259 case 0x8b00: /* bf label */
1260 CHECK_NOT_DELAY_SLOT
1261 gen_conditional_jump(ctx, ctx->pc + 2,
1262 ctx->pc + 4 + B7_0s * 2);
1263 ctx->bstate = BS_BRANCH;
1264 return;
1265 case 0x8f00: /* bf/s label */
1266 CHECK_NOT_DELAY_SLOT
1267 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1268 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1269 return;
1270 case 0x8900: /* bt label */
1271 CHECK_NOT_DELAY_SLOT
1272 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1273 ctx->pc + 2);
1274 ctx->bstate = BS_BRANCH;
1275 return;
1276 case 0x8d00: /* bt/s label */
1277 CHECK_NOT_DELAY_SLOT
1278 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1279 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1280 return;
1281 case 0x8800: /* cmp/eq #imm,R0 */
1282 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1283 return;
1284 case 0xc400: /* mov.b @(disp,GBR),R0 */
1286 TCGv addr = tcg_temp_new();
1287 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1288 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1289 tcg_temp_free(addr);
1291 return;
1292 case 0xc500: /* mov.w @(disp,GBR),R0 */
1294 TCGv addr = tcg_temp_new();
1295 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1296 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1297 tcg_temp_free(addr);
1299 return;
1300 case 0xc600: /* mov.l @(disp,GBR),R0 */
1302 TCGv addr = tcg_temp_new();
1303 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1304 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1305 tcg_temp_free(addr);
1307 return;
1308 case 0xc000: /* mov.b R0,@(disp,GBR) */
1310 TCGv addr = tcg_temp_new();
1311 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1312 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1313 tcg_temp_free(addr);
1315 return;
1316 case 0xc100: /* mov.w R0,@(disp,GBR) */
1318 TCGv addr = tcg_temp_new();
1319 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1320 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1321 tcg_temp_free(addr);
1323 return;
1324 case 0xc200: /* mov.l R0,@(disp,GBR) */
1326 TCGv addr = tcg_temp_new();
1327 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1328 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1329 tcg_temp_free(addr);
1331 return;
1332 case 0x8000: /* mov.b R0,@(disp,Rn) */
1334 TCGv addr = tcg_temp_new();
1335 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1336 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1337 tcg_temp_free(addr);
1339 return;
1340 case 0x8100: /* mov.w R0,@(disp,Rn) */
1342 TCGv addr = tcg_temp_new();
1343 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1344 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1345 tcg_temp_free(addr);
1347 return;
1348 case 0x8400: /* mov.b @(disp,Rn),R0 */
1350 TCGv addr = tcg_temp_new();
1351 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1352 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1353 tcg_temp_free(addr);
1355 return;
1356 case 0x8500: /* mov.w @(disp,Rn),R0 */
1358 TCGv addr = tcg_temp_new();
1359 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1360 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1361 tcg_temp_free(addr);
1363 return;
1364 case 0xc700: /* mova @(disp,PC),R0 */
1365 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1366 return;
1367 case 0xcb00: /* or #imm,R0 */
1368 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1369 return;
1370 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1372 TCGv addr, val;
1373 addr = tcg_temp_new();
1374 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1375 val = tcg_temp_new();
1376 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1377 tcg_gen_ori_i32(val, val, B7_0);
1378 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1379 tcg_temp_free(val);
1380 tcg_temp_free(addr);
1382 return;
1383 case 0xc300: /* trapa #imm */
1385 TCGv imm;
1386 CHECK_NOT_DELAY_SLOT
1387 imm = tcg_const_i32(B7_0);
1388 gen_helper_trapa(imm);
1389 tcg_temp_free(imm);
1390 ctx->bstate = BS_BRANCH;
1392 return;
1393 case 0xc800: /* tst #imm,R0 */
1395 TCGv val = tcg_temp_new();
1396 tcg_gen_andi_i32(val, REG(0), B7_0);
1397 gen_cmp_imm(TCG_COND_EQ, val, 0);
1398 tcg_temp_free(val);
1400 return;
1401 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1403 TCGv val = tcg_temp_new();
1404 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1405 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1406 tcg_gen_andi_i32(val, val, B7_0);
1407 gen_cmp_imm(TCG_COND_EQ, val, 0);
1408 tcg_temp_free(val);
1410 return;
1411 case 0xca00: /* xor #imm,R0 */
1412 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1413 return;
1414 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1416 TCGv addr, val;
1417 addr = tcg_temp_new();
1418 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1419 val = tcg_temp_new();
1420 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1421 tcg_gen_xori_i32(val, val, B7_0);
1422 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1423 tcg_temp_free(val);
1424 tcg_temp_free(addr);
1426 return;
1429 switch (ctx->opcode & 0xf08f) {
1430 case 0x408e: /* ldc Rm,Rn_BANK */
1431 CHECK_PRIVILEGED
1432 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1433 return;
1434 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1435 CHECK_PRIVILEGED
1436 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1437 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1438 return;
1439 case 0x0082: /* stc Rm_BANK,Rn */
1440 CHECK_PRIVILEGED
1441 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1442 return;
1443 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1444 CHECK_PRIVILEGED
1446 TCGv addr = tcg_temp_new();
1447 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1448 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1449 tcg_gen_mov_i32(REG(B11_8), addr);
1450 tcg_temp_free(addr);
1452 return;
1455 switch (ctx->opcode & 0xf0ff) {
1456 case 0x0023: /* braf Rn */
1457 CHECK_NOT_DELAY_SLOT
1458 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1459 ctx->flags |= DELAY_SLOT;
1460 ctx->delayed_pc = (uint32_t) - 1;
1461 return;
1462 case 0x0003: /* bsrf Rn */
1463 CHECK_NOT_DELAY_SLOT
1464 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1465 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1466 ctx->flags |= DELAY_SLOT;
1467 ctx->delayed_pc = (uint32_t) - 1;
1468 return;
1469 case 0x4015: /* cmp/pl Rn */
1470 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1471 return;
1472 case 0x4011: /* cmp/pz Rn */
1473 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1474 return;
1475 case 0x4010: /* dt Rn */
1476 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1477 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1478 return;
1479 case 0x402b: /* jmp @Rn */
1480 CHECK_NOT_DELAY_SLOT
1481 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1482 ctx->flags |= DELAY_SLOT;
1483 ctx->delayed_pc = (uint32_t) - 1;
1484 return;
1485 case 0x400b: /* jsr @Rn */
1486 CHECK_NOT_DELAY_SLOT
1487 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1488 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1489 ctx->flags |= DELAY_SLOT;
1490 ctx->delayed_pc = (uint32_t) - 1;
1491 return;
1492 case 0x400e: /* ldc Rm,SR */
1493 CHECK_PRIVILEGED
1494 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1495 ctx->bstate = BS_STOP;
1496 return;
1497 case 0x4007: /* ldc.l @Rm+,SR */
1498 CHECK_PRIVILEGED
1500 TCGv val = tcg_temp_new();
1501 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1502 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1503 tcg_temp_free(val);
1504 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1505 ctx->bstate = BS_STOP;
1507 return;
1508 case 0x0002: /* stc SR,Rn */
1509 CHECK_PRIVILEGED
1510 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1511 return;
1512 case 0x4003: /* stc SR,@-Rn */
1513 CHECK_PRIVILEGED
1515 TCGv addr = tcg_temp_new();
1516 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1517 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1518 tcg_gen_mov_i32(REG(B11_8), addr);
1519 tcg_temp_free(addr);
1521 return;
1522 #define LD(reg,ldnum,ldpnum,prechk) \
1523 case ldnum: \
1524 prechk \
1525 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1526 return; \
1527 case ldpnum: \
1528 prechk \
1529 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1530 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1531 return;
1532 #define ST(reg,stnum,stpnum,prechk) \
1533 case stnum: \
1534 prechk \
1535 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1536 return; \
1537 case stpnum: \
1538 prechk \
1540 TCGv addr = tcg_temp_new(); \
1541 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1542 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1543 tcg_gen_mov_i32(REG(B11_8), addr); \
1544 tcg_temp_free(addr); \
1546 return;
1547 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1548 LD(reg,ldnum,ldpnum,prechk) \
1549 ST(reg,stnum,stpnum,prechk)
1550 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1551 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1552 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1553 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1554 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1555 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1556 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1557 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1558 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1559 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1560 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1561 case 0x406a: /* lds Rm,FPSCR */
1562 CHECK_FPU_ENABLED
1563 gen_helper_ld_fpscr(REG(B11_8));
1564 ctx->bstate = BS_STOP;
1565 return;
1566 case 0x4066: /* lds.l @Rm+,FPSCR */
1567 CHECK_FPU_ENABLED
1569 TCGv addr = tcg_temp_new();
1570 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1571 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1572 gen_helper_ld_fpscr(addr);
1573 tcg_temp_free(addr);
1574 ctx->bstate = BS_STOP;
1576 return;
1577 case 0x006a: /* sts FPSCR,Rn */
1578 CHECK_FPU_ENABLED
1579 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1580 return;
1581 case 0x4062: /* sts FPSCR,@-Rn */
1582 CHECK_FPU_ENABLED
1584 TCGv addr, val;
1585 val = tcg_temp_new();
1586 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1587 addr = tcg_temp_new();
1588 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1589 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1590 tcg_gen_mov_i32(REG(B11_8), addr);
1591 tcg_temp_free(addr);
1592 tcg_temp_free(val);
1594 return;
1595 case 0x00c3: /* movca.l R0,@Rm */
1597 TCGv val = tcg_temp_new();
1598 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1599 gen_helper_movcal (REG(B11_8), val);
1600 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1602 ctx->has_movcal = 1;
1603 return;
1604 case 0x40a9:
1605 /* MOVUA.L @Rm,R0 (Rm) -> R0
1606 Load non-boundary-aligned data */
1607 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1608 return;
1609 case 0x40e9:
1610 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1611 Load non-boundary-aligned data */
1612 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1613 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1614 return;
1615 case 0x0029: /* movt Rn */
1616 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1617 return;
1618 case 0x0073:
1619 /* MOVCO.L
1620 LDST -> T
1621 If (T == 1) R0 -> (Rn)
1622 0 -> LDST
1624 if (ctx->features & SH_FEATURE_SH4A) {
1625 int label = gen_new_label();
1626 gen_clr_t();
1627 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1628 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1629 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1630 gen_set_label(label);
1631 tcg_gen_movi_i32(cpu_ldst, 0);
1632 return;
1633 } else
1634 break;
1635 case 0x0063:
1636 /* MOVLI.L @Rm,R0
1637 1 -> LDST
1638 (Rm) -> R0
1639 When interrupt/exception
1640 occurred 0 -> LDST
1642 if (ctx->features & SH_FEATURE_SH4A) {
1643 tcg_gen_movi_i32(cpu_ldst, 0);
1644 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1645 tcg_gen_movi_i32(cpu_ldst, 1);
1646 return;
1647 } else
1648 break;
1649 case 0x0093: /* ocbi @Rn */
1651 gen_helper_ocbi (REG(B11_8));
1653 return;
1654 case 0x00a3: /* ocbp @Rn */
1655 case 0x00b3: /* ocbwb @Rn */
1656 /* These instructions are supposed to do nothing in case of
1657 a cache miss. Given that we only partially emulate caches
1658 it is safe to simply ignore them. */
1659 return;
1660 case 0x0083: /* pref @Rn */
1661 return;
1662 case 0x00d3: /* prefi @Rn */
1663 if (ctx->features & SH_FEATURE_SH4A)
1664 return;
1665 else
1666 break;
1667 case 0x00e3: /* icbi @Rn */
1668 if (ctx->features & SH_FEATURE_SH4A)
1669 return;
1670 else
1671 break;
1672 case 0x00ab: /* synco */
1673 if (ctx->features & SH_FEATURE_SH4A)
1674 return;
1675 else
1676 break;
1677 case 0x4024: /* rotcl Rn */
1679 TCGv tmp = tcg_temp_new();
1680 tcg_gen_mov_i32(tmp, cpu_sr);
1681 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1682 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1683 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1684 tcg_temp_free(tmp);
1686 return;
1687 case 0x4025: /* rotcr Rn */
1689 TCGv tmp = tcg_temp_new();
1690 tcg_gen_mov_i32(tmp, cpu_sr);
1691 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1692 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1693 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1694 tcg_temp_free(tmp);
1696 return;
1697 case 0x4004: /* rotl Rn */
1698 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1699 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1700 return;
1701 case 0x4005: /* rotr Rn */
1702 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1703 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1704 return;
1705 case 0x4000: /* shll Rn */
1706 case 0x4020: /* shal Rn */
1707 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1708 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1709 return;
1710 case 0x4021: /* shar Rn */
1711 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1712 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1713 return;
1714 case 0x4001: /* shlr Rn */
1715 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1716 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1717 return;
1718 case 0x4008: /* shll2 Rn */
1719 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1720 return;
1721 case 0x4018: /* shll8 Rn */
1722 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1723 return;
1724 case 0x4028: /* shll16 Rn */
1725 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1726 return;
1727 case 0x4009: /* shlr2 Rn */
1728 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1729 return;
1730 case 0x4019: /* shlr8 Rn */
1731 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1732 return;
1733 case 0x4029: /* shlr16 Rn */
1734 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1735 return;
1736 case 0x401b: /* tas.b @Rn */
1738 TCGv addr, val;
1739 addr = tcg_temp_local_new();
1740 tcg_gen_mov_i32(addr, REG(B11_8));
1741 val = tcg_temp_local_new();
1742 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1743 gen_cmp_imm(TCG_COND_EQ, val, 0);
1744 tcg_gen_ori_i32(val, val, 0x80);
1745 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1746 tcg_temp_free(val);
1747 tcg_temp_free(addr);
1749 return;
1750 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1751 CHECK_FPU_ENABLED
1752 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1753 return;
1754 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1755 CHECK_FPU_ENABLED
1756 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1757 return;
1758 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1759 CHECK_FPU_ENABLED
1760 if (ctx->fpscr & FPSCR_PR) {
1761 TCGv_i64 fp;
1762 if (ctx->opcode & 0x0100)
1763 break; /* illegal instruction */
1764 fp = tcg_temp_new_i64();
1765 gen_helper_float_DT(fp, cpu_fpul);
1766 gen_store_fpr64(fp, DREG(B11_8));
1767 tcg_temp_free_i64(fp);
1769 else {
1770 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1772 return;
1773 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1774 CHECK_FPU_ENABLED
1775 if (ctx->fpscr & FPSCR_PR) {
1776 TCGv_i64 fp;
1777 if (ctx->opcode & 0x0100)
1778 break; /* illegal instruction */
1779 fp = tcg_temp_new_i64();
1780 gen_load_fpr64(fp, DREG(B11_8));
1781 gen_helper_ftrc_DT(cpu_fpul, fp);
1782 tcg_temp_free_i64(fp);
1784 else {
1785 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1787 return;
1788 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1789 CHECK_FPU_ENABLED
1791 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1793 return;
1794 case 0xf05d: /* fabs FRn/DRn */
1795 CHECK_FPU_ENABLED
1796 if (ctx->fpscr & FPSCR_PR) {
1797 if (ctx->opcode & 0x0100)
1798 break; /* illegal instruction */
1799 TCGv_i64 fp = tcg_temp_new_i64();
1800 gen_load_fpr64(fp, DREG(B11_8));
1801 gen_helper_fabs_DT(fp, fp);
1802 gen_store_fpr64(fp, DREG(B11_8));
1803 tcg_temp_free_i64(fp);
1804 } else {
1805 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1807 return;
1808 case 0xf06d: /* fsqrt FRn */
1809 CHECK_FPU_ENABLED
1810 if (ctx->fpscr & FPSCR_PR) {
1811 if (ctx->opcode & 0x0100)
1812 break; /* illegal instruction */
1813 TCGv_i64 fp = tcg_temp_new_i64();
1814 gen_load_fpr64(fp, DREG(B11_8));
1815 gen_helper_fsqrt_DT(fp, fp);
1816 gen_store_fpr64(fp, DREG(B11_8));
1817 tcg_temp_free_i64(fp);
1818 } else {
1819 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1821 return;
1822 case 0xf07d: /* fsrra FRn */
1823 CHECK_FPU_ENABLED
1824 break;
1825 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1826 CHECK_FPU_ENABLED
1827 if (!(ctx->fpscr & FPSCR_PR)) {
1828 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1830 return;
1831 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1832 CHECK_FPU_ENABLED
1833 if (!(ctx->fpscr & FPSCR_PR)) {
1834 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1836 return;
1837 case 0xf0ad: /* fcnvsd FPUL,DRn */
1838 CHECK_FPU_ENABLED
1840 TCGv_i64 fp = tcg_temp_new_i64();
1841 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1842 gen_store_fpr64(fp, DREG(B11_8));
1843 tcg_temp_free_i64(fp);
1845 return;
1846 case 0xf0bd: /* fcnvds DRn,FPUL */
1847 CHECK_FPU_ENABLED
1849 TCGv_i64 fp = tcg_temp_new_i64();
1850 gen_load_fpr64(fp, DREG(B11_8));
1851 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1852 tcg_temp_free_i64(fp);
1854 return;
1855 case 0xf0ed: /* fipr FVm,FVn */
1856 CHECK_FPU_ENABLED
1857 if ((ctx->fpscr & FPSCR_PR) == 0) {
1858 TCGv m, n;
1859 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1860 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1861 gen_helper_fipr(m, n);
1862 tcg_temp_free(m);
1863 tcg_temp_free(n);
1864 return;
1866 break;
1867 case 0xf0fd: /* ftrv XMTRX,FVn */
1868 CHECK_FPU_ENABLED
1869 if ((ctx->opcode & 0x0300) == 0x0100 &&
1870 (ctx->fpscr & FPSCR_PR) == 0) {
1871 TCGv n;
1872 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1873 gen_helper_ftrv(n);
1874 tcg_temp_free(n);
1875 return;
1877 break;
1879 #if 0
1880 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1881 ctx->opcode, ctx->pc);
1882 fflush(stderr);
1883 #endif
1884 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1885 gen_helper_raise_slot_illegal_instruction();
1886 } else {
1887 gen_helper_raise_illegal_instruction();
1889 ctx->bstate = BS_EXCP;
1892 static void decode_opc(DisasContext * ctx)
1894 uint32_t old_flags = ctx->flags;
1896 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1897 tcg_gen_debug_insn_start(ctx->pc);
1900 _decode_opc(ctx);
1902 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1903 if (ctx->flags & DELAY_SLOT_CLEARME) {
1904 gen_store_flags(0);
1905 } else {
1906 /* go out of the delay slot */
1907 uint32_t new_flags = ctx->flags;
1908 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1909 gen_store_flags(new_flags);
1911 ctx->flags = 0;
1912 ctx->bstate = BS_BRANCH;
1913 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1914 gen_delayed_conditional_jump(ctx);
1915 } else if (old_flags & DELAY_SLOT) {
1916 gen_jump(ctx);
1921 /* go into a delay slot */
1922 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1923 gen_store_flags(ctx->flags);
1926 static inline void
1927 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1928 int search_pc)
1930 DisasContext ctx;
1931 target_ulong pc_start;
1932 static uint16_t *gen_opc_end;
1933 CPUBreakpoint *bp;
1934 int i, ii;
1935 int num_insns;
1936 int max_insns;
1938 pc_start = tb->pc;
1939 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1940 ctx.pc = pc_start;
1941 ctx.flags = (uint32_t)tb->flags;
1942 ctx.bstate = BS_NONE;
1943 ctx.sr = env->sr;
1944 ctx.fpscr = env->fpscr;
1945 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1946 /* We don't know if the delayed pc came from a dynamic or static branch,
1947 so assume it is a dynamic branch. */
1948 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1949 ctx.tb = tb;
1950 ctx.singlestep_enabled = env->singlestep_enabled;
1951 ctx.features = env->features;
1952 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1954 ii = -1;
1955 num_insns = 0;
1956 max_insns = tb->cflags & CF_COUNT_MASK;
1957 if (max_insns == 0)
1958 max_insns = CF_COUNT_MASK;
1959 gen_icount_start();
1960 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1961 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1962 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1963 if (ctx.pc == bp->pc) {
1964 /* We have hit a breakpoint - make sure PC is up-to-date */
1965 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1966 gen_helper_debug();
1967 ctx.bstate = BS_EXCP;
1968 break;
1972 if (search_pc) {
1973 i = gen_opc_ptr - gen_opc_buf;
1974 if (ii < i) {
1975 ii++;
1976 while (ii < i)
1977 gen_opc_instr_start[ii++] = 0;
1979 gen_opc_pc[ii] = ctx.pc;
1980 gen_opc_hflags[ii] = ctx.flags;
1981 gen_opc_instr_start[ii] = 1;
1982 gen_opc_icount[ii] = num_insns;
1984 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1985 gen_io_start();
1986 #if 0
1987 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1988 fflush(stderr);
1989 #endif
1990 ctx.opcode = lduw_code(ctx.pc);
1991 decode_opc(&ctx);
1992 num_insns++;
1993 ctx.pc += 2;
1994 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1995 break;
1996 if (env->singlestep_enabled)
1997 break;
1998 if (num_insns >= max_insns)
1999 break;
2000 if (singlestep)
2001 break;
2003 if (tb->cflags & CF_LAST_IO)
2004 gen_io_end();
2005 if (env->singlestep_enabled) {
2006 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2007 gen_helper_debug();
2008 } else {
2009 switch (ctx.bstate) {
2010 case BS_STOP:
2011 /* gen_op_interrupt_restart(); */
2012 /* fall through */
2013 case BS_NONE:
2014 if (ctx.flags) {
2015 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2017 gen_goto_tb(&ctx, 0, ctx.pc);
2018 break;
2019 case BS_EXCP:
2020 /* gen_op_interrupt_restart(); */
2021 tcg_gen_exit_tb(0);
2022 break;
2023 case BS_BRANCH:
2024 default:
2025 break;
2029 gen_icount_end(tb, num_insns);
2030 *gen_opc_ptr = INDEX_op_end;
2031 if (search_pc) {
2032 i = gen_opc_ptr - gen_opc_buf;
2033 ii++;
2034 while (ii <= i)
2035 gen_opc_instr_start[ii++] = 0;
2036 } else {
2037 tb->size = ctx.pc - pc_start;
2038 tb->icount = num_insns;
2041 #ifdef DEBUG_DISAS
2042 #ifdef SH4_DEBUG_DISAS
2043 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2044 #endif
2045 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2046 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2047 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2048 qemu_log("\n");
2050 #endif
2053 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
2055 gen_intermediate_code_internal(env, tb, 0);
2058 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
2060 gen_intermediate_code_internal(env, tb, 1);
2063 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
2065 env->pc = gen_opc_pc[pc_pos];
2066 env->flags = gen_opc_hflags[pc_pos];