Update version for v2.2.0-rc5 release
[qemu/qmp-unstable.git] / target-sh4 / translate.c
blob3088edc6a65000cabdb9028c280dd1c1095afed1
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasContext {
35 struct TranslationBlock *tb;
36 target_ulong pc;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
51 #endif
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 if (done_init)
100 return;
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, ssr), "SSR");
115 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, spc), "SPC");
117 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, gbr), "GBR");
119 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, vbr), "VBR");
121 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, sgr), "SGR");
123 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, dbr), "DBR");
125 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, mach), "MACH");
127 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, macl), "MACL");
129 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, pr), "PR");
131 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpscr), "FPSCR");
133 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, fpul), "FPUL");
136 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, flags), "_flags_");
138 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, delayed_pc),
140 "_delayed_pc_");
141 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, ldst), "_ldst_");
144 for (i = 0; i < 32; i++)
145 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUSH4State, fregs[i]),
147 fregnames[i]);
149 done_init = 1;
152 void superh_cpu_dump_state(CPUState *cs, FILE *f,
153 fprintf_function cpu_fprintf, int flags)
155 SuperHCPU *cpu = SUPERH_CPU(cs);
156 CPUSH4State *env = &cpu->env;
157 int i;
158 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
159 env->pc, env->sr, env->pr, env->fpscr);
160 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
161 env->spc, env->ssr, env->gbr, env->vbr);
162 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
163 env->sgr, env->dbr, env->delayed_pc, env->fpul);
164 for (i = 0; i < 24; i += 4) {
165 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
166 i, env->gregs[i], i + 1, env->gregs[i + 1],
167 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
169 if (env->flags & DELAY_SLOT) {
170 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
171 env->delayed_pc);
172 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
173 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
178 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
180 TranslationBlock *tb;
181 tb = ctx->tb;
183 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
184 !ctx->singlestep_enabled) {
185 /* Use a direct jump if in same page and singlestep not enabled */
186 tcg_gen_goto_tb(n);
187 tcg_gen_movi_i32(cpu_pc, dest);
188 tcg_gen_exit_tb((uintptr_t)tb + n);
189 } else {
190 tcg_gen_movi_i32(cpu_pc, dest);
191 if (ctx->singlestep_enabled)
192 gen_helper_debug(cpu_env);
193 tcg_gen_exit_tb(0);
197 static void gen_jump(DisasContext * ctx)
199 if (ctx->delayed_pc == (uint32_t) - 1) {
200 /* Target is not statically known, it comes necessarily from a
201 delayed jump as immediate jump are conditinal jumps */
202 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
203 if (ctx->singlestep_enabled)
204 gen_helper_debug(cpu_env);
205 tcg_gen_exit_tb(0);
206 } else {
207 gen_goto_tb(ctx, 0, ctx->delayed_pc);
211 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
213 TCGv sr;
214 int label = gen_new_label();
215 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
216 sr = tcg_temp_new();
217 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
218 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
219 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
220 gen_set_label(label);
223 /* Immediate conditional jump (bt or bf) */
224 static void gen_conditional_jump(DisasContext * ctx,
225 target_ulong ift, target_ulong ifnott)
227 int l1;
228 TCGv sr;
230 l1 = gen_new_label();
231 sr = tcg_temp_new();
232 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
233 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
234 gen_goto_tb(ctx, 0, ifnott);
235 gen_set_label(l1);
236 gen_goto_tb(ctx, 1, ift);
239 /* Delayed conditional jump (bt or bf) */
240 static void gen_delayed_conditional_jump(DisasContext * ctx)
242 int l1;
243 TCGv ds;
245 l1 = gen_new_label();
246 ds = tcg_temp_new();
247 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
248 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
249 gen_goto_tb(ctx, 1, ctx->pc + 2);
250 gen_set_label(l1);
251 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
252 gen_jump(ctx);
255 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
257 TCGv t;
259 t = tcg_temp_new();
260 tcg_gen_setcond_i32(cond, t, t1, t0);
261 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
262 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
264 tcg_temp_free(t);
267 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
269 TCGv t;
271 t = tcg_temp_new();
272 tcg_gen_setcondi_i32(cond, t, t0, imm);
273 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
274 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
276 tcg_temp_free(t);
279 static inline void gen_store_flags(uint32_t flags)
281 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
282 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
285 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
287 TCGv tmp = tcg_temp_new();
289 p0 &= 0x1f;
290 p1 &= 0x1f;
292 tcg_gen_andi_i32(tmp, t1, (1 << p1));
293 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
294 if (p0 < p1)
295 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
296 else if (p0 > p1)
297 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
298 tcg_gen_or_i32(t0, t0, tmp);
300 tcg_temp_free(tmp);
303 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
305 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
308 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
310 TCGv_i32 tmp = tcg_temp_new_i32();
311 tcg_gen_trunc_i64_i32(tmp, t);
312 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
313 tcg_gen_shri_i64(t, t, 32);
314 tcg_gen_trunc_i64_i32(tmp, t);
315 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
316 tcg_temp_free_i32(tmp);
319 #define B3_0 (ctx->opcode & 0xf)
320 #define B6_4 ((ctx->opcode >> 4) & 0x7)
321 #define B7_4 ((ctx->opcode >> 4) & 0xf)
322 #define B7_0 (ctx->opcode & 0xff)
323 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
324 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
325 (ctx->opcode & 0xfff))
326 #define B11_8 ((ctx->opcode >> 8) & 0xf)
327 #define B15_12 ((ctx->opcode >> 12) & 0xf)
329 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
332 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
333 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
335 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
336 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
337 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
338 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
340 #define CHECK_NOT_DELAY_SLOT \
341 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
343 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
344 gen_helper_raise_slot_illegal_instruction(cpu_env); \
345 ctx->bstate = BS_BRANCH; \
346 return; \
349 #define CHECK_PRIVILEGED \
350 if (IS_USER(ctx)) { \
351 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
352 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
353 gen_helper_raise_slot_illegal_instruction(cpu_env); \
354 } else { \
355 gen_helper_raise_illegal_instruction(cpu_env); \
357 ctx->bstate = BS_BRANCH; \
358 return; \
361 #define CHECK_FPU_ENABLED \
362 if (ctx->flags & SR_FD) { \
363 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
364 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
365 gen_helper_raise_slot_fpu_disable(cpu_env); \
366 } else { \
367 gen_helper_raise_fpu_disable(cpu_env); \
369 ctx->bstate = BS_BRANCH; \
370 return; \
373 static void _decode_opc(DisasContext * ctx)
375 /* This code tries to make movcal emulation sufficiently
376 accurate for Linux purposes. This instruction writes
377 memory, and prior to that, always allocates a cache line.
378 It is used in two contexts:
379 - in memcpy, where data is copied in blocks, the first write
380 of to a block uses movca.l for performance.
381 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
382 to flush the cache. Here, the data written by movcal.l is never
383 written to memory, and the data written is just bogus.
385 To simulate this, we simulate movcal.l, we store the value to memory,
386 but we also remember the previous content. If we see ocbi, we check
387 if movcal.l for that address was done previously. If so, the write should
388 not have hit the memory, so we restore the previous content.
389 When we see an instruction that is neither movca.l
390 nor ocbi, the previous content is discarded.
392 To optimize, we only try to flush stores when we're at the start of
393 TB, or if we already saw movca.l in this TB and did not flush stores
394 yet. */
395 if (ctx->has_movcal)
397 int opcode = ctx->opcode & 0xf0ff;
398 if (opcode != 0x0093 /* ocbi */
399 && opcode != 0x00c3 /* movca.l */)
401 gen_helper_discard_movcal_backup(cpu_env);
402 ctx->has_movcal = 0;
406 #if 0
407 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
408 #endif
410 switch (ctx->opcode) {
411 case 0x0019: /* div0u */
412 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
413 return;
414 case 0x000b: /* rts */
415 CHECK_NOT_DELAY_SLOT
416 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
417 ctx->flags |= DELAY_SLOT;
418 ctx->delayed_pc = (uint32_t) - 1;
419 return;
420 case 0x0028: /* clrmac */
421 tcg_gen_movi_i32(cpu_mach, 0);
422 tcg_gen_movi_i32(cpu_macl, 0);
423 return;
424 case 0x0048: /* clrs */
425 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
426 return;
427 case 0x0008: /* clrt */
428 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
429 return;
430 case 0x0038: /* ldtlb */
431 CHECK_PRIVILEGED
432 gen_helper_ldtlb(cpu_env);
433 return;
434 case 0x002b: /* rte */
435 CHECK_PRIVILEGED
436 CHECK_NOT_DELAY_SLOT
437 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
438 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
439 ctx->flags |= DELAY_SLOT;
440 ctx->delayed_pc = (uint32_t) - 1;
441 return;
442 case 0x0058: /* sets */
443 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
444 return;
445 case 0x0018: /* sett */
446 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
447 return;
448 case 0xfbfd: /* frchg */
449 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
450 ctx->bstate = BS_STOP;
451 return;
452 case 0xf3fd: /* fschg */
453 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
454 ctx->bstate = BS_STOP;
455 return;
456 case 0x0009: /* nop */
457 return;
458 case 0x001b: /* sleep */
459 CHECK_PRIVILEGED
460 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
461 gen_helper_sleep(cpu_env);
462 return;
465 switch (ctx->opcode & 0xf000) {
466 case 0x1000: /* mov.l Rm,@(disp,Rn) */
468 TCGv addr = tcg_temp_new();
469 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
470 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
471 tcg_temp_free(addr);
473 return;
474 case 0x5000: /* mov.l @(disp,Rm),Rn */
476 TCGv addr = tcg_temp_new();
477 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
478 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
479 tcg_temp_free(addr);
481 return;
482 case 0xe000: /* mov #imm,Rn */
483 tcg_gen_movi_i32(REG(B11_8), B7_0s);
484 return;
485 case 0x9000: /* mov.w @(disp,PC),Rn */
487 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
488 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
489 tcg_temp_free(addr);
491 return;
492 case 0xd000: /* mov.l @(disp,PC),Rn */
494 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
495 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
496 tcg_temp_free(addr);
498 return;
499 case 0x7000: /* add #imm,Rn */
500 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
501 return;
502 case 0xa000: /* bra disp */
503 CHECK_NOT_DELAY_SLOT
504 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
505 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
506 ctx->flags |= DELAY_SLOT;
507 return;
508 case 0xb000: /* bsr disp */
509 CHECK_NOT_DELAY_SLOT
510 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
511 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
512 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
513 ctx->flags |= DELAY_SLOT;
514 return;
517 switch (ctx->opcode & 0xf00f) {
518 case 0x6003: /* mov Rm,Rn */
519 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
520 return;
521 case 0x2000: /* mov.b Rm,@Rn */
522 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
523 return;
524 case 0x2001: /* mov.w Rm,@Rn */
525 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
526 return;
527 case 0x2002: /* mov.l Rm,@Rn */
528 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
529 return;
530 case 0x6000: /* mov.b @Rm,Rn */
531 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
532 return;
533 case 0x6001: /* mov.w @Rm,Rn */
534 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
535 return;
536 case 0x6002: /* mov.l @Rm,Rn */
537 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
538 return;
539 case 0x2004: /* mov.b Rm,@-Rn */
541 TCGv addr = tcg_temp_new();
542 tcg_gen_subi_i32(addr, REG(B11_8), 1);
543 /* might cause re-execution */
544 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
545 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
546 tcg_temp_free(addr);
548 return;
549 case 0x2005: /* mov.w Rm,@-Rn */
551 TCGv addr = tcg_temp_new();
552 tcg_gen_subi_i32(addr, REG(B11_8), 2);
553 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
554 tcg_gen_mov_i32(REG(B11_8), addr);
555 tcg_temp_free(addr);
557 return;
558 case 0x2006: /* mov.l Rm,@-Rn */
560 TCGv addr = tcg_temp_new();
561 tcg_gen_subi_i32(addr, REG(B11_8), 4);
562 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
563 tcg_gen_mov_i32(REG(B11_8), addr);
565 return;
566 case 0x6004: /* mov.b @Rm+,Rn */
567 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
568 if ( B11_8 != B7_4 )
569 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
570 return;
571 case 0x6005: /* mov.w @Rm+,Rn */
572 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
573 if ( B11_8 != B7_4 )
574 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
575 return;
576 case 0x6006: /* mov.l @Rm+,Rn */
577 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
578 if ( B11_8 != B7_4 )
579 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
580 return;
581 case 0x0004: /* mov.b Rm,@(R0,Rn) */
583 TCGv addr = tcg_temp_new();
584 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
585 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
586 tcg_temp_free(addr);
588 return;
589 case 0x0005: /* mov.w Rm,@(R0,Rn) */
591 TCGv addr = tcg_temp_new();
592 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
593 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
594 tcg_temp_free(addr);
596 return;
597 case 0x0006: /* mov.l Rm,@(R0,Rn) */
599 TCGv addr = tcg_temp_new();
600 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
601 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
602 tcg_temp_free(addr);
604 return;
605 case 0x000c: /* mov.b @(R0,Rm),Rn */
607 TCGv addr = tcg_temp_new();
608 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
609 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
610 tcg_temp_free(addr);
612 return;
613 case 0x000d: /* mov.w @(R0,Rm),Rn */
615 TCGv addr = tcg_temp_new();
616 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
617 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
618 tcg_temp_free(addr);
620 return;
621 case 0x000e: /* mov.l @(R0,Rm),Rn */
623 TCGv addr = tcg_temp_new();
624 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
625 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
626 tcg_temp_free(addr);
628 return;
629 case 0x6008: /* swap.b Rm,Rn */
631 TCGv high, low;
632 high = tcg_temp_new();
633 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
634 low = tcg_temp_new();
635 tcg_gen_ext16u_i32(low, REG(B7_4));
636 tcg_gen_bswap16_i32(low, low);
637 tcg_gen_or_i32(REG(B11_8), high, low);
638 tcg_temp_free(low);
639 tcg_temp_free(high);
641 return;
642 case 0x6009: /* swap.w Rm,Rn */
643 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
644 return;
645 case 0x200d: /* xtrct Rm,Rn */
647 TCGv high, low;
648 high = tcg_temp_new();
649 tcg_gen_shli_i32(high, REG(B7_4), 16);
650 low = tcg_temp_new();
651 tcg_gen_shri_i32(low, REG(B11_8), 16);
652 tcg_gen_or_i32(REG(B11_8), high, low);
653 tcg_temp_free(low);
654 tcg_temp_free(high);
656 return;
657 case 0x300c: /* add Rm,Rn */
658 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
659 return;
660 case 0x300e: /* addc Rm,Rn */
662 TCGv t0, t1, t2;
663 t0 = tcg_temp_new();
664 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
665 t1 = tcg_temp_new();
666 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
667 tcg_gen_add_i32(t0, t0, t1);
668 t2 = tcg_temp_new();
669 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
670 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
671 tcg_gen_or_i32(t1, t1, t2);
672 tcg_temp_free(t2);
673 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
674 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
675 tcg_temp_free(t1);
676 tcg_gen_mov_i32(REG(B11_8), t0);
677 tcg_temp_free(t0);
679 return;
680 case 0x300f: /* addv Rm,Rn */
682 TCGv t0, t1, t2;
683 t0 = tcg_temp_new();
684 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
685 t1 = tcg_temp_new();
686 tcg_gen_xor_i32(t1, t0, REG(B11_8));
687 t2 = tcg_temp_new();
688 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
689 tcg_gen_andc_i32(t1, t1, t2);
690 tcg_temp_free(t2);
691 tcg_gen_shri_i32(t1, t1, 31);
692 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
693 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
694 tcg_temp_free(t1);
695 tcg_gen_mov_i32(REG(B7_4), t0);
696 tcg_temp_free(t0);
698 return;
699 case 0x2009: /* and Rm,Rn */
700 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
701 return;
702 case 0x3000: /* cmp/eq Rm,Rn */
703 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
704 return;
705 case 0x3003: /* cmp/ge Rm,Rn */
706 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
707 return;
708 case 0x3007: /* cmp/gt Rm,Rn */
709 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
710 return;
711 case 0x3006: /* cmp/hi Rm,Rn */
712 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
713 return;
714 case 0x3002: /* cmp/hs Rm,Rn */
715 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
716 return;
717 case 0x200c: /* cmp/str Rm,Rn */
719 TCGv cmp1 = tcg_temp_new();
720 TCGv cmp2 = tcg_temp_new();
721 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
722 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
723 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
724 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
725 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
726 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
727 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
728 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
729 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
730 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
731 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
732 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
733 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
734 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
735 tcg_temp_free(cmp2);
736 tcg_temp_free(cmp1);
738 return;
739 case 0x2007: /* div0s Rm,Rn */
741 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
742 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
743 TCGv val = tcg_temp_new();
744 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
745 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
746 tcg_temp_free(val);
748 return;
749 case 0x3004: /* div1 Rm,Rn */
750 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
751 return;
752 case 0x300d: /* dmuls.l Rm,Rn */
753 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
754 return;
755 case 0x3005: /* dmulu.l Rm,Rn */
756 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
757 return;
758 case 0x600e: /* exts.b Rm,Rn */
759 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
760 return;
761 case 0x600f: /* exts.w Rm,Rn */
762 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
763 return;
764 case 0x600c: /* extu.b Rm,Rn */
765 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
766 return;
767 case 0x600d: /* extu.w Rm,Rn */
768 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
769 return;
770 case 0x000f: /* mac.l @Rm+,@Rn+ */
772 TCGv arg0, arg1;
773 arg0 = tcg_temp_new();
774 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
775 arg1 = tcg_temp_new();
776 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
777 gen_helper_macl(cpu_env, arg0, arg1);
778 tcg_temp_free(arg1);
779 tcg_temp_free(arg0);
780 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
781 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
783 return;
784 case 0x400f: /* mac.w @Rm+,@Rn+ */
786 TCGv arg0, arg1;
787 arg0 = tcg_temp_new();
788 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
789 arg1 = tcg_temp_new();
790 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
791 gen_helper_macw(cpu_env, arg0, arg1);
792 tcg_temp_free(arg1);
793 tcg_temp_free(arg0);
794 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
795 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
797 return;
798 case 0x0007: /* mul.l Rm,Rn */
799 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
800 return;
801 case 0x200f: /* muls.w Rm,Rn */
803 TCGv arg0, arg1;
804 arg0 = tcg_temp_new();
805 tcg_gen_ext16s_i32(arg0, REG(B7_4));
806 arg1 = tcg_temp_new();
807 tcg_gen_ext16s_i32(arg1, REG(B11_8));
808 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
809 tcg_temp_free(arg1);
810 tcg_temp_free(arg0);
812 return;
813 case 0x200e: /* mulu.w Rm,Rn */
815 TCGv arg0, arg1;
816 arg0 = tcg_temp_new();
817 tcg_gen_ext16u_i32(arg0, REG(B7_4));
818 arg1 = tcg_temp_new();
819 tcg_gen_ext16u_i32(arg1, REG(B11_8));
820 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
821 tcg_temp_free(arg1);
822 tcg_temp_free(arg0);
824 return;
825 case 0x600b: /* neg Rm,Rn */
826 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
827 return;
828 case 0x600a: /* negc Rm,Rn */
830 TCGv t0, t1;
831 t0 = tcg_temp_new();
832 tcg_gen_neg_i32(t0, REG(B7_4));
833 t1 = tcg_temp_new();
834 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
835 tcg_gen_sub_i32(REG(B11_8), t0, t1);
836 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
837 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
838 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
839 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
840 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
841 tcg_temp_free(t0);
842 tcg_temp_free(t1);
844 return;
845 case 0x6007: /* not Rm,Rn */
846 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
847 return;
848 case 0x200b: /* or Rm,Rn */
849 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
850 return;
851 case 0x400c: /* shad Rm,Rn */
853 int label1 = gen_new_label();
854 int label2 = gen_new_label();
855 int label3 = gen_new_label();
856 int label4 = gen_new_label();
857 TCGv shift;
858 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
859 /* Rm positive, shift to the left */
860 shift = tcg_temp_new();
861 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
862 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
863 tcg_temp_free(shift);
864 tcg_gen_br(label4);
865 /* Rm negative, shift to the right */
866 gen_set_label(label1);
867 shift = tcg_temp_new();
868 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
869 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
870 tcg_gen_not_i32(shift, REG(B7_4));
871 tcg_gen_andi_i32(shift, shift, 0x1f);
872 tcg_gen_addi_i32(shift, shift, 1);
873 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
874 tcg_temp_free(shift);
875 tcg_gen_br(label4);
876 /* Rm = -32 */
877 gen_set_label(label2);
878 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
879 tcg_gen_movi_i32(REG(B11_8), 0);
880 tcg_gen_br(label4);
881 gen_set_label(label3);
882 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
883 gen_set_label(label4);
885 return;
886 case 0x400d: /* shld Rm,Rn */
888 int label1 = gen_new_label();
889 int label2 = gen_new_label();
890 int label3 = gen_new_label();
891 TCGv shift;
892 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
893 /* Rm positive, shift to the left */
894 shift = tcg_temp_new();
895 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
896 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
897 tcg_temp_free(shift);
898 tcg_gen_br(label3);
899 /* Rm negative, shift to the right */
900 gen_set_label(label1);
901 shift = tcg_temp_new();
902 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
903 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
904 tcg_gen_not_i32(shift, REG(B7_4));
905 tcg_gen_andi_i32(shift, shift, 0x1f);
906 tcg_gen_addi_i32(shift, shift, 1);
907 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
908 tcg_temp_free(shift);
909 tcg_gen_br(label3);
910 /* Rm = -32 */
911 gen_set_label(label2);
912 tcg_gen_movi_i32(REG(B11_8), 0);
913 gen_set_label(label3);
915 return;
916 case 0x3008: /* sub Rm,Rn */
917 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
918 return;
919 case 0x300a: /* subc Rm,Rn */
921 TCGv t0, t1, t2;
922 t0 = tcg_temp_new();
923 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
924 t1 = tcg_temp_new();
925 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
926 tcg_gen_sub_i32(t0, t1, t0);
927 t2 = tcg_temp_new();
928 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
929 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
930 tcg_gen_or_i32(t1, t1, t2);
931 tcg_temp_free(t2);
932 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
933 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
934 tcg_temp_free(t1);
935 tcg_gen_mov_i32(REG(B11_8), t0);
936 tcg_temp_free(t0);
938 return;
939 case 0x300b: /* subv Rm,Rn */
941 TCGv t0, t1, t2;
942 t0 = tcg_temp_new();
943 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
944 t1 = tcg_temp_new();
945 tcg_gen_xor_i32(t1, t0, REG(B7_4));
946 t2 = tcg_temp_new();
947 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
948 tcg_gen_and_i32(t1, t1, t2);
949 tcg_temp_free(t2);
950 tcg_gen_shri_i32(t1, t1, 31);
951 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
952 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
953 tcg_temp_free(t1);
954 tcg_gen_mov_i32(REG(B11_8), t0);
955 tcg_temp_free(t0);
957 return;
958 case 0x2008: /* tst Rm,Rn */
960 TCGv val = tcg_temp_new();
961 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
962 gen_cmp_imm(TCG_COND_EQ, val, 0);
963 tcg_temp_free(val);
965 return;
966 case 0x200a: /* xor Rm,Rn */
967 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
968 return;
969 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
970 CHECK_FPU_ENABLED
971 if (ctx->flags & FPSCR_SZ) {
972 TCGv_i64 fp = tcg_temp_new_i64();
973 gen_load_fpr64(fp, XREG(B7_4));
974 gen_store_fpr64(fp, XREG(B11_8));
975 tcg_temp_free_i64(fp);
976 } else {
977 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
979 return;
980 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
981 CHECK_FPU_ENABLED
982 if (ctx->flags & FPSCR_SZ) {
983 TCGv addr_hi = tcg_temp_new();
984 int fr = XREG(B7_4);
985 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
986 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
987 ctx->memidx, MO_TEUL);
988 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
989 ctx->memidx, MO_TEUL);
990 tcg_temp_free(addr_hi);
991 } else {
992 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
993 ctx->memidx, MO_TEUL);
995 return;
996 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
997 CHECK_FPU_ENABLED
998 if (ctx->flags & FPSCR_SZ) {
999 TCGv addr_hi = tcg_temp_new();
1000 int fr = XREG(B11_8);
1001 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1002 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1003 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1004 tcg_temp_free(addr_hi);
1005 } else {
1006 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1007 ctx->memidx, MO_TEUL);
1009 return;
1010 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1011 CHECK_FPU_ENABLED
1012 if (ctx->flags & FPSCR_SZ) {
1013 TCGv addr_hi = tcg_temp_new();
1014 int fr = XREG(B11_8);
1015 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1016 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1017 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1018 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1019 tcg_temp_free(addr_hi);
1020 } else {
1021 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1022 ctx->memidx, MO_TEUL);
1023 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1025 return;
1026 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1027 CHECK_FPU_ENABLED
1028 if (ctx->flags & FPSCR_SZ) {
1029 TCGv addr = tcg_temp_new_i32();
1030 int fr = XREG(B7_4);
1031 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1032 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1033 tcg_gen_subi_i32(addr, addr, 4);
1034 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1035 tcg_gen_mov_i32(REG(B11_8), addr);
1036 tcg_temp_free(addr);
1037 } else {
1038 TCGv addr;
1039 addr = tcg_temp_new_i32();
1040 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1041 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1042 ctx->memidx, MO_TEUL);
1043 tcg_gen_mov_i32(REG(B11_8), addr);
1044 tcg_temp_free(addr);
1046 return;
1047 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1048 CHECK_FPU_ENABLED
1050 TCGv addr = tcg_temp_new_i32();
1051 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1052 if (ctx->flags & FPSCR_SZ) {
1053 int fr = XREG(B11_8);
1054 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1055 ctx->memidx, MO_TEUL);
1056 tcg_gen_addi_i32(addr, addr, 4);
1057 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1058 ctx->memidx, MO_TEUL);
1059 } else {
1060 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1061 ctx->memidx, MO_TEUL);
1063 tcg_temp_free(addr);
1065 return;
1066 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1067 CHECK_FPU_ENABLED
1069 TCGv addr = tcg_temp_new();
1070 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1071 if (ctx->flags & FPSCR_SZ) {
1072 int fr = XREG(B7_4);
1073 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1074 ctx->memidx, MO_TEUL);
1075 tcg_gen_addi_i32(addr, addr, 4);
1076 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1077 ctx->memidx, MO_TEUL);
1078 } else {
1079 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1080 ctx->memidx, MO_TEUL);
1082 tcg_temp_free(addr);
1084 return;
1085 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1086 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1087 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1088 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1090 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1092 CHECK_FPU_ENABLED
1093 if (ctx->flags & FPSCR_PR) {
1094 TCGv_i64 fp0, fp1;
1096 if (ctx->opcode & 0x0110)
1097 break; /* illegal instruction */
1098 fp0 = tcg_temp_new_i64();
1099 fp1 = tcg_temp_new_i64();
1100 gen_load_fpr64(fp0, DREG(B11_8));
1101 gen_load_fpr64(fp1, DREG(B7_4));
1102 switch (ctx->opcode & 0xf00f) {
1103 case 0xf000: /* fadd Rm,Rn */
1104 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1105 break;
1106 case 0xf001: /* fsub Rm,Rn */
1107 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1108 break;
1109 case 0xf002: /* fmul Rm,Rn */
1110 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1111 break;
1112 case 0xf003: /* fdiv Rm,Rn */
1113 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1114 break;
1115 case 0xf004: /* fcmp/eq Rm,Rn */
1116 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1117 return;
1118 case 0xf005: /* fcmp/gt Rm,Rn */
1119 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1120 return;
1122 gen_store_fpr64(fp0, DREG(B11_8));
1123 tcg_temp_free_i64(fp0);
1124 tcg_temp_free_i64(fp1);
1125 } else {
1126 switch (ctx->opcode & 0xf00f) {
1127 case 0xf000: /* fadd Rm,Rn */
1128 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1129 cpu_fregs[FREG(B11_8)],
1130 cpu_fregs[FREG(B7_4)]);
1131 break;
1132 case 0xf001: /* fsub Rm,Rn */
1133 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1134 cpu_fregs[FREG(B11_8)],
1135 cpu_fregs[FREG(B7_4)]);
1136 break;
1137 case 0xf002: /* fmul Rm,Rn */
1138 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1139 cpu_fregs[FREG(B11_8)],
1140 cpu_fregs[FREG(B7_4)]);
1141 break;
1142 case 0xf003: /* fdiv Rm,Rn */
1143 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1144 cpu_fregs[FREG(B11_8)],
1145 cpu_fregs[FREG(B7_4)]);
1146 break;
1147 case 0xf004: /* fcmp/eq Rm,Rn */
1148 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1149 cpu_fregs[FREG(B7_4)]);
1150 return;
1151 case 0xf005: /* fcmp/gt Rm,Rn */
1152 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1153 cpu_fregs[FREG(B7_4)]);
1154 return;
1158 return;
1159 case 0xf00e: /* fmac FR0,RM,Rn */
1161 CHECK_FPU_ENABLED
1162 if (ctx->flags & FPSCR_PR) {
1163 break; /* illegal instruction */
1164 } else {
1165 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1166 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1167 cpu_fregs[FREG(B11_8)]);
1168 return;
1173 switch (ctx->opcode & 0xff00) {
1174 case 0xc900: /* and #imm,R0 */
1175 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1176 return;
1177 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1179 TCGv addr, val;
1180 addr = tcg_temp_new();
1181 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1182 val = tcg_temp_new();
1183 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1184 tcg_gen_andi_i32(val, val, B7_0);
1185 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1186 tcg_temp_free(val);
1187 tcg_temp_free(addr);
1189 return;
1190 case 0x8b00: /* bf label */
1191 CHECK_NOT_DELAY_SLOT
1192 gen_conditional_jump(ctx, ctx->pc + 2,
1193 ctx->pc + 4 + B7_0s * 2);
1194 ctx->bstate = BS_BRANCH;
1195 return;
1196 case 0x8f00: /* bf/s label */
1197 CHECK_NOT_DELAY_SLOT
1198 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1199 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1200 return;
1201 case 0x8900: /* bt label */
1202 CHECK_NOT_DELAY_SLOT
1203 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1204 ctx->pc + 2);
1205 ctx->bstate = BS_BRANCH;
1206 return;
1207 case 0x8d00: /* bt/s label */
1208 CHECK_NOT_DELAY_SLOT
1209 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1210 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1211 return;
1212 case 0x8800: /* cmp/eq #imm,R0 */
1213 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1214 return;
1215 case 0xc400: /* mov.b @(disp,GBR),R0 */
1217 TCGv addr = tcg_temp_new();
1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1220 tcg_temp_free(addr);
1222 return;
1223 case 0xc500: /* mov.w @(disp,GBR),R0 */
1225 TCGv addr = tcg_temp_new();
1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1227 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1228 tcg_temp_free(addr);
1230 return;
1231 case 0xc600: /* mov.l @(disp,GBR),R0 */
1233 TCGv addr = tcg_temp_new();
1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1235 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1236 tcg_temp_free(addr);
1238 return;
1239 case 0xc000: /* mov.b R0,@(disp,GBR) */
1241 TCGv addr = tcg_temp_new();
1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1244 tcg_temp_free(addr);
1246 return;
1247 case 0xc100: /* mov.w R0,@(disp,GBR) */
1249 TCGv addr = tcg_temp_new();
1250 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1252 tcg_temp_free(addr);
1254 return;
1255 case 0xc200: /* mov.l R0,@(disp,GBR) */
1257 TCGv addr = tcg_temp_new();
1258 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1260 tcg_temp_free(addr);
1262 return;
1263 case 0x8000: /* mov.b R0,@(disp,Rn) */
1265 TCGv addr = tcg_temp_new();
1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1267 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1268 tcg_temp_free(addr);
1270 return;
1271 case 0x8100: /* mov.w R0,@(disp,Rn) */
1273 TCGv addr = tcg_temp_new();
1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1275 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1276 tcg_temp_free(addr);
1278 return;
1279 case 0x8400: /* mov.b @(disp,Rn),R0 */
1281 TCGv addr = tcg_temp_new();
1282 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1283 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1284 tcg_temp_free(addr);
1286 return;
1287 case 0x8500: /* mov.w @(disp,Rn),R0 */
1289 TCGv addr = tcg_temp_new();
1290 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1291 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1292 tcg_temp_free(addr);
1294 return;
1295 case 0xc700: /* mova @(disp,PC),R0 */
1296 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1297 return;
1298 case 0xcb00: /* or #imm,R0 */
1299 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1300 return;
1301 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1303 TCGv addr, val;
1304 addr = tcg_temp_new();
1305 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1306 val = tcg_temp_new();
1307 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1308 tcg_gen_ori_i32(val, val, B7_0);
1309 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1310 tcg_temp_free(val);
1311 tcg_temp_free(addr);
1313 return;
1314 case 0xc300: /* trapa #imm */
1316 TCGv imm;
1317 CHECK_NOT_DELAY_SLOT
1318 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1319 imm = tcg_const_i32(B7_0);
1320 gen_helper_trapa(cpu_env, imm);
1321 tcg_temp_free(imm);
1322 ctx->bstate = BS_BRANCH;
1324 return;
1325 case 0xc800: /* tst #imm,R0 */
1327 TCGv val = tcg_temp_new();
1328 tcg_gen_andi_i32(val, REG(0), B7_0);
1329 gen_cmp_imm(TCG_COND_EQ, val, 0);
1330 tcg_temp_free(val);
1332 return;
1333 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1335 TCGv val = tcg_temp_new();
1336 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1337 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1338 tcg_gen_andi_i32(val, val, B7_0);
1339 gen_cmp_imm(TCG_COND_EQ, val, 0);
1340 tcg_temp_free(val);
1342 return;
1343 case 0xca00: /* xor #imm,R0 */
1344 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1345 return;
1346 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1348 TCGv addr, val;
1349 addr = tcg_temp_new();
1350 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1351 val = tcg_temp_new();
1352 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1353 tcg_gen_xori_i32(val, val, B7_0);
1354 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1355 tcg_temp_free(val);
1356 tcg_temp_free(addr);
1358 return;
1361 switch (ctx->opcode & 0xf08f) {
1362 case 0x408e: /* ldc Rm,Rn_BANK */
1363 CHECK_PRIVILEGED
1364 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1365 return;
1366 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1367 CHECK_PRIVILEGED
1368 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1369 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1370 return;
1371 case 0x0082: /* stc Rm_BANK,Rn */
1372 CHECK_PRIVILEGED
1373 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1374 return;
1375 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1376 CHECK_PRIVILEGED
1378 TCGv addr = tcg_temp_new();
1379 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1380 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1381 tcg_gen_mov_i32(REG(B11_8), addr);
1382 tcg_temp_free(addr);
1384 return;
1387 switch (ctx->opcode & 0xf0ff) {
1388 case 0x0023: /* braf Rn */
1389 CHECK_NOT_DELAY_SLOT
1390 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1391 ctx->flags |= DELAY_SLOT;
1392 ctx->delayed_pc = (uint32_t) - 1;
1393 return;
1394 case 0x0003: /* bsrf Rn */
1395 CHECK_NOT_DELAY_SLOT
1396 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1397 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1398 ctx->flags |= DELAY_SLOT;
1399 ctx->delayed_pc = (uint32_t) - 1;
1400 return;
1401 case 0x4015: /* cmp/pl Rn */
1402 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1403 return;
1404 case 0x4011: /* cmp/pz Rn */
1405 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1406 return;
1407 case 0x4010: /* dt Rn */
1408 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1409 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1410 return;
1411 case 0x402b: /* jmp @Rn */
1412 CHECK_NOT_DELAY_SLOT
1413 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1414 ctx->flags |= DELAY_SLOT;
1415 ctx->delayed_pc = (uint32_t) - 1;
1416 return;
1417 case 0x400b: /* jsr @Rn */
1418 CHECK_NOT_DELAY_SLOT
1419 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1420 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1421 ctx->flags |= DELAY_SLOT;
1422 ctx->delayed_pc = (uint32_t) - 1;
1423 return;
1424 case 0x400e: /* ldc Rm,SR */
1425 CHECK_PRIVILEGED
1426 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1427 ctx->bstate = BS_STOP;
1428 return;
1429 case 0x4007: /* ldc.l @Rm+,SR */
1430 CHECK_PRIVILEGED
1432 TCGv val = tcg_temp_new();
1433 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1434 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1435 tcg_temp_free(val);
1436 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1437 ctx->bstate = BS_STOP;
1439 return;
1440 case 0x0002: /* stc SR,Rn */
1441 CHECK_PRIVILEGED
1442 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1443 return;
1444 case 0x4003: /* stc SR,@-Rn */
1445 CHECK_PRIVILEGED
1447 TCGv addr = tcg_temp_new();
1448 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1449 tcg_gen_qemu_st_i32(cpu_sr, addr, ctx->memidx, MO_TEUL);
1450 tcg_gen_mov_i32(REG(B11_8), addr);
1451 tcg_temp_free(addr);
1453 return;
1454 #define LD(reg,ldnum,ldpnum,prechk) \
1455 case ldnum: \
1456 prechk \
1457 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1458 return; \
1459 case ldpnum: \
1460 prechk \
1461 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1462 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1463 return;
1464 #define ST(reg,stnum,stpnum,prechk) \
1465 case stnum: \
1466 prechk \
1467 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1468 return; \
1469 case stpnum: \
1470 prechk \
1472 TCGv addr = tcg_temp_new(); \
1473 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1474 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1475 tcg_gen_mov_i32(REG(B11_8), addr); \
1476 tcg_temp_free(addr); \
1478 return;
1479 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1480 LD(reg,ldnum,ldpnum,prechk) \
1481 ST(reg,stnum,stpnum,prechk)
1482 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1483 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1484 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1485 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1486 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1487 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1488 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1489 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1490 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1491 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1492 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1493 case 0x406a: /* lds Rm,FPSCR */
1494 CHECK_FPU_ENABLED
1495 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1496 ctx->bstate = BS_STOP;
1497 return;
1498 case 0x4066: /* lds.l @Rm+,FPSCR */
1499 CHECK_FPU_ENABLED
1501 TCGv addr = tcg_temp_new();
1502 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1503 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1504 gen_helper_ld_fpscr(cpu_env, addr);
1505 tcg_temp_free(addr);
1506 ctx->bstate = BS_STOP;
1508 return;
1509 case 0x006a: /* sts FPSCR,Rn */
1510 CHECK_FPU_ENABLED
1511 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1512 return;
1513 case 0x4062: /* sts FPSCR,@-Rn */
1514 CHECK_FPU_ENABLED
1516 TCGv addr, val;
1517 val = tcg_temp_new();
1518 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1519 addr = tcg_temp_new();
1520 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1521 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1522 tcg_gen_mov_i32(REG(B11_8), addr);
1523 tcg_temp_free(addr);
1524 tcg_temp_free(val);
1526 return;
1527 case 0x00c3: /* movca.l R0,@Rm */
1529 TCGv val = tcg_temp_new();
1530 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1531 gen_helper_movcal(cpu_env, REG(B11_8), val);
1532 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1534 ctx->has_movcal = 1;
1535 return;
1536 case 0x40a9:
1537 /* MOVUA.L @Rm,R0 (Rm) -> R0
1538 Load non-boundary-aligned data */
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1540 return;
1541 case 0x40e9:
1542 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1543 Load non-boundary-aligned data */
1544 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1545 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1546 return;
1547 case 0x0029: /* movt Rn */
1548 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1549 return;
1550 case 0x0073:
1551 /* MOVCO.L
1552 LDST -> T
1553 If (T == 1) R0 -> (Rn)
1554 0 -> LDST
1556 if (ctx->features & SH_FEATURE_SH4A) {
1557 int label = gen_new_label();
1558 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1559 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1560 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1561 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1562 gen_set_label(label);
1563 tcg_gen_movi_i32(cpu_ldst, 0);
1564 return;
1565 } else
1566 break;
1567 case 0x0063:
1568 /* MOVLI.L @Rm,R0
1569 1 -> LDST
1570 (Rm) -> R0
1571 When interrupt/exception
1572 occurred 0 -> LDST
1574 if (ctx->features & SH_FEATURE_SH4A) {
1575 tcg_gen_movi_i32(cpu_ldst, 0);
1576 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1577 tcg_gen_movi_i32(cpu_ldst, 1);
1578 return;
1579 } else
1580 break;
1581 case 0x0093: /* ocbi @Rn */
1583 gen_helper_ocbi(cpu_env, REG(B11_8));
1585 return;
1586 case 0x00a3: /* ocbp @Rn */
1587 case 0x00b3: /* ocbwb @Rn */
1588 /* These instructions are supposed to do nothing in case of
1589 a cache miss. Given that we only partially emulate caches
1590 it is safe to simply ignore them. */
1591 return;
1592 case 0x0083: /* pref @Rn */
1593 return;
1594 case 0x00d3: /* prefi @Rn */
1595 if (ctx->features & SH_FEATURE_SH4A)
1596 return;
1597 else
1598 break;
1599 case 0x00e3: /* icbi @Rn */
1600 if (ctx->features & SH_FEATURE_SH4A)
1601 return;
1602 else
1603 break;
1604 case 0x00ab: /* synco */
1605 if (ctx->features & SH_FEATURE_SH4A)
1606 return;
1607 else
1608 break;
1609 case 0x4024: /* rotcl Rn */
1611 TCGv tmp = tcg_temp_new();
1612 tcg_gen_mov_i32(tmp, cpu_sr);
1613 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1614 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1615 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1616 tcg_temp_free(tmp);
1618 return;
1619 case 0x4025: /* rotcr Rn */
1621 TCGv tmp = tcg_temp_new();
1622 tcg_gen_mov_i32(tmp, cpu_sr);
1623 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1624 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1625 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1626 tcg_temp_free(tmp);
1628 return;
1629 case 0x4004: /* rotl Rn */
1630 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1631 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1632 return;
1633 case 0x4005: /* rotr Rn */
1634 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1635 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1636 return;
1637 case 0x4000: /* shll Rn */
1638 case 0x4020: /* shal Rn */
1639 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1640 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1641 return;
1642 case 0x4021: /* shar Rn */
1643 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1644 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1645 return;
1646 case 0x4001: /* shlr Rn */
1647 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1648 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1649 return;
1650 case 0x4008: /* shll2 Rn */
1651 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1652 return;
1653 case 0x4018: /* shll8 Rn */
1654 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1655 return;
1656 case 0x4028: /* shll16 Rn */
1657 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1658 return;
1659 case 0x4009: /* shlr2 Rn */
1660 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1661 return;
1662 case 0x4019: /* shlr8 Rn */
1663 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1664 return;
1665 case 0x4029: /* shlr16 Rn */
1666 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1667 return;
1668 case 0x401b: /* tas.b @Rn */
1670 TCGv addr, val;
1671 addr = tcg_temp_local_new();
1672 tcg_gen_mov_i32(addr, REG(B11_8));
1673 val = tcg_temp_local_new();
1674 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1675 gen_cmp_imm(TCG_COND_EQ, val, 0);
1676 tcg_gen_ori_i32(val, val, 0x80);
1677 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1678 tcg_temp_free(val);
1679 tcg_temp_free(addr);
1681 return;
1682 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1683 CHECK_FPU_ENABLED
1684 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1685 return;
1686 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1687 CHECK_FPU_ENABLED
1688 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1689 return;
1690 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1691 CHECK_FPU_ENABLED
1692 if (ctx->flags & FPSCR_PR) {
1693 TCGv_i64 fp;
1694 if (ctx->opcode & 0x0100)
1695 break; /* illegal instruction */
1696 fp = tcg_temp_new_i64();
1697 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1698 gen_store_fpr64(fp, DREG(B11_8));
1699 tcg_temp_free_i64(fp);
1701 else {
1702 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1704 return;
1705 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1706 CHECK_FPU_ENABLED
1707 if (ctx->flags & FPSCR_PR) {
1708 TCGv_i64 fp;
1709 if (ctx->opcode & 0x0100)
1710 break; /* illegal instruction */
1711 fp = tcg_temp_new_i64();
1712 gen_load_fpr64(fp, DREG(B11_8));
1713 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1714 tcg_temp_free_i64(fp);
1716 else {
1717 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1719 return;
1720 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1721 CHECK_FPU_ENABLED
1723 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1725 return;
1726 case 0xf05d: /* fabs FRn/DRn */
1727 CHECK_FPU_ENABLED
1728 if (ctx->flags & FPSCR_PR) {
1729 if (ctx->opcode & 0x0100)
1730 break; /* illegal instruction */
1731 TCGv_i64 fp = tcg_temp_new_i64();
1732 gen_load_fpr64(fp, DREG(B11_8));
1733 gen_helper_fabs_DT(fp, fp);
1734 gen_store_fpr64(fp, DREG(B11_8));
1735 tcg_temp_free_i64(fp);
1736 } else {
1737 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1739 return;
1740 case 0xf06d: /* fsqrt FRn */
1741 CHECK_FPU_ENABLED
1742 if (ctx->flags & FPSCR_PR) {
1743 if (ctx->opcode & 0x0100)
1744 break; /* illegal instruction */
1745 TCGv_i64 fp = tcg_temp_new_i64();
1746 gen_load_fpr64(fp, DREG(B11_8));
1747 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1748 gen_store_fpr64(fp, DREG(B11_8));
1749 tcg_temp_free_i64(fp);
1750 } else {
1751 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1752 cpu_fregs[FREG(B11_8)]);
1754 return;
1755 case 0xf07d: /* fsrra FRn */
1756 CHECK_FPU_ENABLED
1757 break;
1758 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1759 CHECK_FPU_ENABLED
1760 if (!(ctx->flags & FPSCR_PR)) {
1761 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1763 return;
1764 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1765 CHECK_FPU_ENABLED
1766 if (!(ctx->flags & FPSCR_PR)) {
1767 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1769 return;
1770 case 0xf0ad: /* fcnvsd FPUL,DRn */
1771 CHECK_FPU_ENABLED
1773 TCGv_i64 fp = tcg_temp_new_i64();
1774 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1775 gen_store_fpr64(fp, DREG(B11_8));
1776 tcg_temp_free_i64(fp);
1778 return;
1779 case 0xf0bd: /* fcnvds DRn,FPUL */
1780 CHECK_FPU_ENABLED
1782 TCGv_i64 fp = tcg_temp_new_i64();
1783 gen_load_fpr64(fp, DREG(B11_8));
1784 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1785 tcg_temp_free_i64(fp);
1787 return;
1788 case 0xf0ed: /* fipr FVm,FVn */
1789 CHECK_FPU_ENABLED
1790 if ((ctx->flags & FPSCR_PR) == 0) {
1791 TCGv m, n;
1792 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1793 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1794 gen_helper_fipr(cpu_env, m, n);
1795 tcg_temp_free(m);
1796 tcg_temp_free(n);
1797 return;
1799 break;
1800 case 0xf0fd: /* ftrv XMTRX,FVn */
1801 CHECK_FPU_ENABLED
1802 if ((ctx->opcode & 0x0300) == 0x0100 &&
1803 (ctx->flags & FPSCR_PR) == 0) {
1804 TCGv n;
1805 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1806 gen_helper_ftrv(cpu_env, n);
1807 tcg_temp_free(n);
1808 return;
1810 break;
1812 #if 0
1813 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1814 ctx->opcode, ctx->pc);
1815 fflush(stderr);
1816 #endif
1817 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1818 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1819 gen_helper_raise_slot_illegal_instruction(cpu_env);
1820 } else {
1821 gen_helper_raise_illegal_instruction(cpu_env);
1823 ctx->bstate = BS_BRANCH;
1826 static void decode_opc(DisasContext * ctx)
1828 uint32_t old_flags = ctx->flags;
1830 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1831 tcg_gen_debug_insn_start(ctx->pc);
1834 _decode_opc(ctx);
1836 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1837 if (ctx->flags & DELAY_SLOT_CLEARME) {
1838 gen_store_flags(0);
1839 } else {
1840 /* go out of the delay slot */
1841 uint32_t new_flags = ctx->flags;
1842 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1843 gen_store_flags(new_flags);
1845 ctx->flags = 0;
1846 ctx->bstate = BS_BRANCH;
1847 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1848 gen_delayed_conditional_jump(ctx);
1849 } else if (old_flags & DELAY_SLOT) {
1850 gen_jump(ctx);
1855 /* go into a delay slot */
1856 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1857 gen_store_flags(ctx->flags);
1860 static inline void
1861 gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
1862 bool search_pc)
1864 CPUState *cs = CPU(cpu);
1865 CPUSH4State *env = &cpu->env;
1866 DisasContext ctx;
1867 target_ulong pc_start;
1868 static uint16_t *gen_opc_end;
1869 CPUBreakpoint *bp;
1870 int i, ii;
1871 int num_insns;
1872 int max_insns;
1874 pc_start = tb->pc;
1875 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1876 ctx.pc = pc_start;
1877 ctx.flags = (uint32_t)tb->flags;
1878 ctx.bstate = BS_NONE;
1879 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1880 /* We don't know if the delayed pc came from a dynamic or static branch,
1881 so assume it is a dynamic branch. */
1882 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1883 ctx.tb = tb;
1884 ctx.singlestep_enabled = cs->singlestep_enabled;
1885 ctx.features = env->features;
1886 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1888 ii = -1;
1889 num_insns = 0;
1890 max_insns = tb->cflags & CF_COUNT_MASK;
1891 if (max_insns == 0)
1892 max_insns = CF_COUNT_MASK;
1893 gen_tb_start();
1894 while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
1895 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1896 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1897 if (ctx.pc == bp->pc) {
1898 /* We have hit a breakpoint - make sure PC is up-to-date */
1899 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1900 gen_helper_debug(cpu_env);
1901 ctx.bstate = BS_BRANCH;
1902 break;
1906 if (search_pc) {
1907 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1908 if (ii < i) {
1909 ii++;
1910 while (ii < i)
1911 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1913 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1914 gen_opc_hflags[ii] = ctx.flags;
1915 tcg_ctx.gen_opc_instr_start[ii] = 1;
1916 tcg_ctx.gen_opc_icount[ii] = num_insns;
1918 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1919 gen_io_start();
1920 #if 0
1921 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1922 fflush(stderr);
1923 #endif
1924 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1925 decode_opc(&ctx);
1926 num_insns++;
1927 ctx.pc += 2;
1928 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1929 break;
1930 if (cs->singlestep_enabled) {
1931 break;
1933 if (num_insns >= max_insns)
1934 break;
1935 if (singlestep)
1936 break;
1938 if (tb->cflags & CF_LAST_IO)
1939 gen_io_end();
1940 if (cs->singlestep_enabled) {
1941 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1942 gen_helper_debug(cpu_env);
1943 } else {
1944 switch (ctx.bstate) {
1945 case BS_STOP:
1946 /* gen_op_interrupt_restart(); */
1947 /* fall through */
1948 case BS_NONE:
1949 if (ctx.flags) {
1950 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1952 gen_goto_tb(&ctx, 0, ctx.pc);
1953 break;
1954 case BS_EXCP:
1955 /* gen_op_interrupt_restart(); */
1956 tcg_gen_exit_tb(0);
1957 break;
1958 case BS_BRANCH:
1959 default:
1960 break;
1964 gen_tb_end(tb, num_insns);
1965 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1966 if (search_pc) {
1967 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1968 ii++;
1969 while (ii <= i)
1970 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1971 } else {
1972 tb->size = ctx.pc - pc_start;
1973 tb->icount = num_insns;
1976 #ifdef DEBUG_DISAS
1977 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1978 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1979 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
1980 qemu_log("\n");
1982 #endif
1985 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1987 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
1990 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
1992 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
1995 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
1997 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1998 env->flags = gen_opc_hflags[pc_pos];