vfio/pci: Convert all MemoryRegion to dynamic alloc and consistent functions
[qemu/kevin.git] / target-sh4 / translate.c
blob7c189680a7a46e39b2be796fc531350458bdb0f1
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 typedef struct DisasContext {
36 struct TranslationBlock *tb;
37 target_ulong pc;
38 uint16_t opcode;
39 uint32_t flags;
40 int bstate;
41 int memidx;
42 uint32_t delayed_pc;
43 int singlestep_enabled;
44 uint32_t features;
45 int has_movcal;
46 } DisasContext;
48 #if defined(CONFIG_USER_ONLY)
49 #define IS_USER(ctx) 1
50 #else
51 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
52 #endif
54 enum {
55 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
56 * exception condition
58 BS_STOP = 1, /* We want to stop translation for any reason */
59 BS_BRANCH = 2, /* We reached a branch condition */
60 BS_EXCP = 3, /* We reached an exception condition */
63 /* global register indexes */
64 static TCGv_env cpu_env;
65 static TCGv cpu_gregs[24];
66 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
67 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
68 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
69 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
70 static TCGv cpu_fregs[32];
72 /* internal register indexes */
73 static TCGv cpu_flags, cpu_delayed_pc;
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 if (done_init)
100 return;
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
109 cpu_pc = tcg_global_mem_new_i32(cpu_env,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
114 offsetof(CPUSH4State, sr_m), "SR_M");
115 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
116 offsetof(CPUSH4State, sr_q), "SR_Q");
117 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
118 offsetof(CPUSH4State, sr_t), "SR_T");
119 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
120 offsetof(CPUSH4State, ssr), "SSR");
121 cpu_spc = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, spc), "SPC");
123 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, gbr), "GBR");
125 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, vbr), "VBR");
127 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
128 offsetof(CPUSH4State, sgr), "SGR");
129 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
130 offsetof(CPUSH4State, dbr), "DBR");
131 cpu_mach = tcg_global_mem_new_i32(cpu_env,
132 offsetof(CPUSH4State, mach), "MACH");
133 cpu_macl = tcg_global_mem_new_i32(cpu_env,
134 offsetof(CPUSH4State, macl), "MACL");
135 cpu_pr = tcg_global_mem_new_i32(cpu_env,
136 offsetof(CPUSH4State, pr), "PR");
137 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, fpscr), "FPSCR");
139 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, fpul), "FPUL");
142 cpu_flags = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State, flags), "_flags_");
144 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
145 offsetof(CPUSH4State, delayed_pc),
146 "_delayed_pc_");
147 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, ldst), "_ldst_");
150 for (i = 0; i < 32; i++)
151 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
152 offsetof(CPUSH4State, fregs[i]),
153 fregnames[i]);
155 done_init = 1;
158 void superh_cpu_dump_state(CPUState *cs, FILE *f,
159 fprintf_function cpu_fprintf, int flags)
161 SuperHCPU *cpu = SUPERH_CPU(cs);
162 CPUSH4State *env = &cpu->env;
163 int i;
164 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
165 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
166 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
167 env->spc, env->ssr, env->gbr, env->vbr);
168 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
169 env->sgr, env->dbr, env->delayed_pc, env->fpul);
170 for (i = 0; i < 24; i += 4) {
171 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
172 i, env->gregs[i], i + 1, env->gregs[i + 1],
173 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 if (env->flags & DELAY_SLOT) {
176 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
179 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
184 static void gen_read_sr(TCGv dst)
186 TCGv t0 = tcg_temp_new();
187 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
188 tcg_gen_or_i32(dst, dst, t0);
189 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
190 tcg_gen_or_i32(dst, dst, t0);
191 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
192 tcg_gen_or_i32(dst, cpu_sr, t0);
193 tcg_temp_free_i32(t0);
196 static void gen_write_sr(TCGv src)
198 tcg_gen_andi_i32(cpu_sr, src,
199 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
200 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
201 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
202 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
203 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
204 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
205 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
208 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
210 TranslationBlock *tb;
211 tb = ctx->tb;
213 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
214 !ctx->singlestep_enabled) {
215 /* Use a direct jump if in same page and singlestep not enabled */
216 tcg_gen_goto_tb(n);
217 tcg_gen_movi_i32(cpu_pc, dest);
218 tcg_gen_exit_tb((uintptr_t)tb + n);
219 } else {
220 tcg_gen_movi_i32(cpu_pc, dest);
221 if (ctx->singlestep_enabled)
222 gen_helper_debug(cpu_env);
223 tcg_gen_exit_tb(0);
227 static void gen_jump(DisasContext * ctx)
229 if (ctx->delayed_pc == (uint32_t) - 1) {
230 /* Target is not statically known, it comes necessarily from a
231 delayed jump as immediate jump are conditinal jumps */
232 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
233 if (ctx->singlestep_enabled)
234 gen_helper_debug(cpu_env);
235 tcg_gen_exit_tb(0);
236 } else {
237 gen_goto_tb(ctx, 0, ctx->delayed_pc);
241 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
243 TCGLabel *label = gen_new_label();
244 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
245 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
246 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
247 gen_set_label(label);
250 /* Immediate conditional jump (bt or bf) */
251 static void gen_conditional_jump(DisasContext * ctx,
252 target_ulong ift, target_ulong ifnott)
254 TCGLabel *l1 = gen_new_label();
255 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
256 gen_goto_tb(ctx, 0, ifnott);
257 gen_set_label(l1);
258 gen_goto_tb(ctx, 1, ift);
261 /* Delayed conditional jump (bt or bf) */
262 static void gen_delayed_conditional_jump(DisasContext * ctx)
264 TCGLabel *l1;
265 TCGv ds;
267 l1 = gen_new_label();
268 ds = tcg_temp_new();
269 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
270 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
271 gen_goto_tb(ctx, 1, ctx->pc + 2);
272 gen_set_label(l1);
273 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
274 gen_jump(ctx);
277 static inline void gen_store_flags(uint32_t flags)
279 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
280 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
283 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
285 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
288 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
290 TCGv_i32 tmp = tcg_temp_new_i32();
291 tcg_gen_extrl_i64_i32(tmp, t);
292 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
293 tcg_gen_shri_i64(t, t, 32);
294 tcg_gen_extrl_i64_i32(tmp, t);
295 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
296 tcg_temp_free_i32(tmp);
299 #define B3_0 (ctx->opcode & 0xf)
300 #define B6_4 ((ctx->opcode >> 4) & 0x7)
301 #define B7_4 ((ctx->opcode >> 4) & 0xf)
302 #define B7_0 (ctx->opcode & 0xff)
303 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
304 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
305 (ctx->opcode & 0xfff))
306 #define B11_8 ((ctx->opcode >> 8) & 0xf)
307 #define B15_12 ((ctx->opcode >> 12) & 0xf)
309 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
310 && (ctx->flags & (1u << SR_RB))\
311 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
313 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
314 || !(ctx->flags & (1u << SR_RB)))\
315 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
317 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
318 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
319 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
320 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
322 #define CHECK_NOT_DELAY_SLOT \
323 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
325 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
326 gen_helper_raise_slot_illegal_instruction(cpu_env); \
327 ctx->bstate = BS_BRANCH; \
328 return; \
331 #define CHECK_PRIVILEGED \
332 if (IS_USER(ctx)) { \
333 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
334 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
335 gen_helper_raise_slot_illegal_instruction(cpu_env); \
336 } else { \
337 gen_helper_raise_illegal_instruction(cpu_env); \
339 ctx->bstate = BS_BRANCH; \
340 return; \
343 #define CHECK_FPU_ENABLED \
344 if (ctx->flags & (1u << SR_FD)) { \
345 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
346 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
347 gen_helper_raise_slot_fpu_disable(cpu_env); \
348 } else { \
349 gen_helper_raise_fpu_disable(cpu_env); \
351 ctx->bstate = BS_BRANCH; \
352 return; \
355 static void _decode_opc(DisasContext * ctx)
357 /* This code tries to make movcal emulation sufficiently
358 accurate for Linux purposes. This instruction writes
359 memory, and prior to that, always allocates a cache line.
360 It is used in two contexts:
361 - in memcpy, where data is copied in blocks, the first write
362 of to a block uses movca.l for performance.
363 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
364 to flush the cache. Here, the data written by movcal.l is never
365 written to memory, and the data written is just bogus.
367 To simulate this, we simulate movcal.l, we store the value to memory,
368 but we also remember the previous content. If we see ocbi, we check
369 if movcal.l for that address was done previously. If so, the write should
370 not have hit the memory, so we restore the previous content.
371 When we see an instruction that is neither movca.l
372 nor ocbi, the previous content is discarded.
374 To optimize, we only try to flush stores when we're at the start of
375 TB, or if we already saw movca.l in this TB and did not flush stores
376 yet. */
377 if (ctx->has_movcal)
379 int opcode = ctx->opcode & 0xf0ff;
380 if (opcode != 0x0093 /* ocbi */
381 && opcode != 0x00c3 /* movca.l */)
383 gen_helper_discard_movcal_backup(cpu_env);
384 ctx->has_movcal = 0;
388 #if 0
389 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
390 #endif
392 switch (ctx->opcode) {
393 case 0x0019: /* div0u */
394 tcg_gen_movi_i32(cpu_sr_m, 0);
395 tcg_gen_movi_i32(cpu_sr_q, 0);
396 tcg_gen_movi_i32(cpu_sr_t, 0);
397 return;
398 case 0x000b: /* rts */
399 CHECK_NOT_DELAY_SLOT
400 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
401 ctx->flags |= DELAY_SLOT;
402 ctx->delayed_pc = (uint32_t) - 1;
403 return;
404 case 0x0028: /* clrmac */
405 tcg_gen_movi_i32(cpu_mach, 0);
406 tcg_gen_movi_i32(cpu_macl, 0);
407 return;
408 case 0x0048: /* clrs */
409 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
410 return;
411 case 0x0008: /* clrt */
412 tcg_gen_movi_i32(cpu_sr_t, 0);
413 return;
414 case 0x0038: /* ldtlb */
415 CHECK_PRIVILEGED
416 gen_helper_ldtlb(cpu_env);
417 return;
418 case 0x002b: /* rte */
419 CHECK_PRIVILEGED
420 CHECK_NOT_DELAY_SLOT
421 gen_write_sr(cpu_ssr);
422 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
423 ctx->flags |= DELAY_SLOT;
424 ctx->delayed_pc = (uint32_t) - 1;
425 return;
426 case 0x0058: /* sets */
427 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
428 return;
429 case 0x0018: /* sett */
430 tcg_gen_movi_i32(cpu_sr_t, 1);
431 return;
432 case 0xfbfd: /* frchg */
433 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
434 ctx->bstate = BS_STOP;
435 return;
436 case 0xf3fd: /* fschg */
437 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
438 ctx->bstate = BS_STOP;
439 return;
440 case 0x0009: /* nop */
441 return;
442 case 0x001b: /* sleep */
443 CHECK_PRIVILEGED
444 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
445 gen_helper_sleep(cpu_env);
446 return;
449 switch (ctx->opcode & 0xf000) {
450 case 0x1000: /* mov.l Rm,@(disp,Rn) */
452 TCGv addr = tcg_temp_new();
453 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
454 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
455 tcg_temp_free(addr);
457 return;
458 case 0x5000: /* mov.l @(disp,Rm),Rn */
460 TCGv addr = tcg_temp_new();
461 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
462 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
463 tcg_temp_free(addr);
465 return;
466 case 0xe000: /* mov #imm,Rn */
467 tcg_gen_movi_i32(REG(B11_8), B7_0s);
468 return;
469 case 0x9000: /* mov.w @(disp,PC),Rn */
471 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
472 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
473 tcg_temp_free(addr);
475 return;
476 case 0xd000: /* mov.l @(disp,PC),Rn */
478 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
479 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
480 tcg_temp_free(addr);
482 return;
483 case 0x7000: /* add #imm,Rn */
484 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
485 return;
486 case 0xa000: /* bra disp */
487 CHECK_NOT_DELAY_SLOT
488 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
489 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
490 ctx->flags |= DELAY_SLOT;
491 return;
492 case 0xb000: /* bsr disp */
493 CHECK_NOT_DELAY_SLOT
494 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
495 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
496 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
497 ctx->flags |= DELAY_SLOT;
498 return;
501 switch (ctx->opcode & 0xf00f) {
502 case 0x6003: /* mov Rm,Rn */
503 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
504 return;
505 case 0x2000: /* mov.b Rm,@Rn */
506 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
507 return;
508 case 0x2001: /* mov.w Rm,@Rn */
509 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
510 return;
511 case 0x2002: /* mov.l Rm,@Rn */
512 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
513 return;
514 case 0x6000: /* mov.b @Rm,Rn */
515 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
516 return;
517 case 0x6001: /* mov.w @Rm,Rn */
518 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
519 return;
520 case 0x6002: /* mov.l @Rm,Rn */
521 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
522 return;
523 case 0x2004: /* mov.b Rm,@-Rn */
525 TCGv addr = tcg_temp_new();
526 tcg_gen_subi_i32(addr, REG(B11_8), 1);
527 /* might cause re-execution */
528 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
529 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
530 tcg_temp_free(addr);
532 return;
533 case 0x2005: /* mov.w Rm,@-Rn */
535 TCGv addr = tcg_temp_new();
536 tcg_gen_subi_i32(addr, REG(B11_8), 2);
537 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
538 tcg_gen_mov_i32(REG(B11_8), addr);
539 tcg_temp_free(addr);
541 return;
542 case 0x2006: /* mov.l Rm,@-Rn */
544 TCGv addr = tcg_temp_new();
545 tcg_gen_subi_i32(addr, REG(B11_8), 4);
546 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
547 tcg_gen_mov_i32(REG(B11_8), addr);
549 return;
550 case 0x6004: /* mov.b @Rm+,Rn */
551 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
552 if ( B11_8 != B7_4 )
553 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
554 return;
555 case 0x6005: /* mov.w @Rm+,Rn */
556 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
557 if ( B11_8 != B7_4 )
558 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
559 return;
560 case 0x6006: /* mov.l @Rm+,Rn */
561 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
562 if ( B11_8 != B7_4 )
563 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
564 return;
565 case 0x0004: /* mov.b Rm,@(R0,Rn) */
567 TCGv addr = tcg_temp_new();
568 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
569 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
570 tcg_temp_free(addr);
572 return;
573 case 0x0005: /* mov.w Rm,@(R0,Rn) */
575 TCGv addr = tcg_temp_new();
576 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
577 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
578 tcg_temp_free(addr);
580 return;
581 case 0x0006: /* mov.l Rm,@(R0,Rn) */
583 TCGv addr = tcg_temp_new();
584 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
585 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
586 tcg_temp_free(addr);
588 return;
589 case 0x000c: /* mov.b @(R0,Rm),Rn */
591 TCGv addr = tcg_temp_new();
592 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
593 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
594 tcg_temp_free(addr);
596 return;
597 case 0x000d: /* mov.w @(R0,Rm),Rn */
599 TCGv addr = tcg_temp_new();
600 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
601 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
602 tcg_temp_free(addr);
604 return;
605 case 0x000e: /* mov.l @(R0,Rm),Rn */
607 TCGv addr = tcg_temp_new();
608 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
609 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
610 tcg_temp_free(addr);
612 return;
613 case 0x6008: /* swap.b Rm,Rn */
615 TCGv low = tcg_temp_new();;
616 tcg_gen_ext16u_i32(low, REG(B7_4));
617 tcg_gen_bswap16_i32(low, low);
618 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
619 tcg_temp_free(low);
621 return;
622 case 0x6009: /* swap.w Rm,Rn */
623 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
624 return;
625 case 0x200d: /* xtrct Rm,Rn */
627 TCGv high, low;
628 high = tcg_temp_new();
629 tcg_gen_shli_i32(high, REG(B7_4), 16);
630 low = tcg_temp_new();
631 tcg_gen_shri_i32(low, REG(B11_8), 16);
632 tcg_gen_or_i32(REG(B11_8), high, low);
633 tcg_temp_free(low);
634 tcg_temp_free(high);
636 return;
637 case 0x300c: /* add Rm,Rn */
638 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
639 return;
640 case 0x300e: /* addc Rm,Rn */
642 TCGv t0, t1;
643 t0 = tcg_const_tl(0);
644 t1 = tcg_temp_new();
645 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
646 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
647 REG(B11_8), t0, t1, cpu_sr_t);
648 tcg_temp_free(t0);
649 tcg_temp_free(t1);
651 return;
652 case 0x300f: /* addv Rm,Rn */
654 TCGv t0, t1, t2;
655 t0 = tcg_temp_new();
656 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
657 t1 = tcg_temp_new();
658 tcg_gen_xor_i32(t1, t0, REG(B11_8));
659 t2 = tcg_temp_new();
660 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
661 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
662 tcg_temp_free(t2);
663 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
664 tcg_temp_free(t1);
665 tcg_gen_mov_i32(REG(B7_4), t0);
666 tcg_temp_free(t0);
668 return;
669 case 0x2009: /* and Rm,Rn */
670 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
671 return;
672 case 0x3000: /* cmp/eq Rm,Rn */
673 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
674 return;
675 case 0x3003: /* cmp/ge Rm,Rn */
676 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
677 return;
678 case 0x3007: /* cmp/gt Rm,Rn */
679 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
680 return;
681 case 0x3006: /* cmp/hi Rm,Rn */
682 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
683 return;
684 case 0x3002: /* cmp/hs Rm,Rn */
685 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
686 return;
687 case 0x200c: /* cmp/str Rm,Rn */
689 TCGv cmp1 = tcg_temp_new();
690 TCGv cmp2 = tcg_temp_new();
691 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
692 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
693 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
694 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
695 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
696 tcg_temp_free(cmp2);
697 tcg_temp_free(cmp1);
699 return;
700 case 0x2007: /* div0s Rm,Rn */
701 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
702 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
703 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
704 return;
705 case 0x3004: /* div1 Rm,Rn */
707 TCGv t0 = tcg_temp_new();
708 TCGv t1 = tcg_temp_new();
709 TCGv t2 = tcg_temp_new();
710 TCGv zero = tcg_const_i32(0);
712 /* shift left arg1, saving the bit being pushed out and inserting
713 T on the right */
714 tcg_gen_shri_i32(t0, REG(B11_8), 31);
715 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
716 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
718 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
719 using 64-bit temps, we compute arg0's high part from q ^ m, so
720 that it is 0x00000000 when adding the value or 0xffffffff when
721 subtracting it. */
722 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
723 tcg_gen_subi_i32(t1, t1, 1);
724 tcg_gen_neg_i32(t2, REG(B7_4));
725 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
726 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
728 /* compute T and Q depending on carry */
729 tcg_gen_andi_i32(t1, t1, 1);
730 tcg_gen_xor_i32(t1, t1, t0);
731 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
732 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
734 tcg_temp_free(zero);
735 tcg_temp_free(t2);
736 tcg_temp_free(t1);
737 tcg_temp_free(t0);
739 return;
740 case 0x300d: /* dmuls.l Rm,Rn */
741 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
742 return;
743 case 0x3005: /* dmulu.l Rm,Rn */
744 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
745 return;
746 case 0x600e: /* exts.b Rm,Rn */
747 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
748 return;
749 case 0x600f: /* exts.w Rm,Rn */
750 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
751 return;
752 case 0x600c: /* extu.b Rm,Rn */
753 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
754 return;
755 case 0x600d: /* extu.w Rm,Rn */
756 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
757 return;
758 case 0x000f: /* mac.l @Rm+,@Rn+ */
760 TCGv arg0, arg1;
761 arg0 = tcg_temp_new();
762 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
763 arg1 = tcg_temp_new();
764 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
765 gen_helper_macl(cpu_env, arg0, arg1);
766 tcg_temp_free(arg1);
767 tcg_temp_free(arg0);
768 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
769 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
771 return;
772 case 0x400f: /* mac.w @Rm+,@Rn+ */
774 TCGv arg0, arg1;
775 arg0 = tcg_temp_new();
776 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
777 arg1 = tcg_temp_new();
778 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
779 gen_helper_macw(cpu_env, arg0, arg1);
780 tcg_temp_free(arg1);
781 tcg_temp_free(arg0);
782 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
783 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
785 return;
786 case 0x0007: /* mul.l Rm,Rn */
787 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
788 return;
789 case 0x200f: /* muls.w Rm,Rn */
791 TCGv arg0, arg1;
792 arg0 = tcg_temp_new();
793 tcg_gen_ext16s_i32(arg0, REG(B7_4));
794 arg1 = tcg_temp_new();
795 tcg_gen_ext16s_i32(arg1, REG(B11_8));
796 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
797 tcg_temp_free(arg1);
798 tcg_temp_free(arg0);
800 return;
801 case 0x200e: /* mulu.w Rm,Rn */
803 TCGv arg0, arg1;
804 arg0 = tcg_temp_new();
805 tcg_gen_ext16u_i32(arg0, REG(B7_4));
806 arg1 = tcg_temp_new();
807 tcg_gen_ext16u_i32(arg1, REG(B11_8));
808 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
809 tcg_temp_free(arg1);
810 tcg_temp_free(arg0);
812 return;
813 case 0x600b: /* neg Rm,Rn */
814 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
815 return;
816 case 0x600a: /* negc Rm,Rn */
818 TCGv t0 = tcg_const_i32(0);
819 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
820 REG(B7_4), t0, cpu_sr_t, t0);
821 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
822 t0, t0, REG(B11_8), cpu_sr_t);
823 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
824 tcg_temp_free(t0);
826 return;
827 case 0x6007: /* not Rm,Rn */
828 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
829 return;
830 case 0x200b: /* or Rm,Rn */
831 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
832 return;
833 case 0x400c: /* shad Rm,Rn */
835 TCGv t0 = tcg_temp_new();
836 TCGv t1 = tcg_temp_new();
837 TCGv t2 = tcg_temp_new();
839 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
841 /* positive case: shift to the left */
842 tcg_gen_shl_i32(t1, REG(B11_8), t0);
844 /* negative case: shift to the right in two steps to
845 correctly handle the -32 case */
846 tcg_gen_xori_i32(t0, t0, 0x1f);
847 tcg_gen_sar_i32(t2, REG(B11_8), t0);
848 tcg_gen_sari_i32(t2, t2, 1);
850 /* select between the two cases */
851 tcg_gen_movi_i32(t0, 0);
852 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
854 tcg_temp_free(t0);
855 tcg_temp_free(t1);
856 tcg_temp_free(t2);
858 return;
859 case 0x400d: /* shld Rm,Rn */
861 TCGv t0 = tcg_temp_new();
862 TCGv t1 = tcg_temp_new();
863 TCGv t2 = tcg_temp_new();
865 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
867 /* positive case: shift to the left */
868 tcg_gen_shl_i32(t1, REG(B11_8), t0);
870 /* negative case: shift to the right in two steps to
871 correctly handle the -32 case */
872 tcg_gen_xori_i32(t0, t0, 0x1f);
873 tcg_gen_shr_i32(t2, REG(B11_8), t0);
874 tcg_gen_shri_i32(t2, t2, 1);
876 /* select between the two cases */
877 tcg_gen_movi_i32(t0, 0);
878 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
880 tcg_temp_free(t0);
881 tcg_temp_free(t1);
882 tcg_temp_free(t2);
884 return;
885 case 0x3008: /* sub Rm,Rn */
886 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
887 return;
888 case 0x300a: /* subc Rm,Rn */
890 TCGv t0, t1;
891 t0 = tcg_const_tl(0);
892 t1 = tcg_temp_new();
893 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
894 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
895 REG(B11_8), t0, t1, cpu_sr_t);
896 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
897 tcg_temp_free(t0);
898 tcg_temp_free(t1);
900 return;
901 case 0x300b: /* subv Rm,Rn */
903 TCGv t0, t1, t2;
904 t0 = tcg_temp_new();
905 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
906 t1 = tcg_temp_new();
907 tcg_gen_xor_i32(t1, t0, REG(B7_4));
908 t2 = tcg_temp_new();
909 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
910 tcg_gen_and_i32(t1, t1, t2);
911 tcg_temp_free(t2);
912 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
913 tcg_temp_free(t1);
914 tcg_gen_mov_i32(REG(B11_8), t0);
915 tcg_temp_free(t0);
917 return;
918 case 0x2008: /* tst Rm,Rn */
920 TCGv val = tcg_temp_new();
921 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
922 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
923 tcg_temp_free(val);
925 return;
926 case 0x200a: /* xor Rm,Rn */
927 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
928 return;
929 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
930 CHECK_FPU_ENABLED
931 if (ctx->flags & FPSCR_SZ) {
932 TCGv_i64 fp = tcg_temp_new_i64();
933 gen_load_fpr64(fp, XREG(B7_4));
934 gen_store_fpr64(fp, XREG(B11_8));
935 tcg_temp_free_i64(fp);
936 } else {
937 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
939 return;
940 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
941 CHECK_FPU_ENABLED
942 if (ctx->flags & FPSCR_SZ) {
943 TCGv addr_hi = tcg_temp_new();
944 int fr = XREG(B7_4);
945 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
946 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
947 ctx->memidx, MO_TEUL);
948 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
949 ctx->memidx, MO_TEUL);
950 tcg_temp_free(addr_hi);
951 } else {
952 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
953 ctx->memidx, MO_TEUL);
955 return;
956 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
957 CHECK_FPU_ENABLED
958 if (ctx->flags & FPSCR_SZ) {
959 TCGv addr_hi = tcg_temp_new();
960 int fr = XREG(B11_8);
961 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
962 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
963 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
964 tcg_temp_free(addr_hi);
965 } else {
966 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
967 ctx->memidx, MO_TEUL);
969 return;
970 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
971 CHECK_FPU_ENABLED
972 if (ctx->flags & FPSCR_SZ) {
973 TCGv addr_hi = tcg_temp_new();
974 int fr = XREG(B11_8);
975 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
976 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
977 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
978 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
979 tcg_temp_free(addr_hi);
980 } else {
981 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
982 ctx->memidx, MO_TEUL);
983 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
985 return;
986 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
987 CHECK_FPU_ENABLED
988 TCGv addr = tcg_temp_new_i32();
989 tcg_gen_subi_i32(addr, REG(B11_8), 4);
990 if (ctx->flags & FPSCR_SZ) {
991 int fr = XREG(B7_4);
992 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
993 tcg_gen_subi_i32(addr, addr, 4);
994 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
995 } else {
996 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
997 ctx->memidx, MO_TEUL);
999 tcg_gen_mov_i32(REG(B11_8), addr);
1000 tcg_temp_free(addr);
1001 return;
1002 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1003 CHECK_FPU_ENABLED
1005 TCGv addr = tcg_temp_new_i32();
1006 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1007 if (ctx->flags & FPSCR_SZ) {
1008 int fr = XREG(B11_8);
1009 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1010 ctx->memidx, MO_TEUL);
1011 tcg_gen_addi_i32(addr, addr, 4);
1012 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1013 ctx->memidx, MO_TEUL);
1014 } else {
1015 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1016 ctx->memidx, MO_TEUL);
1018 tcg_temp_free(addr);
1020 return;
1021 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1022 CHECK_FPU_ENABLED
1024 TCGv addr = tcg_temp_new();
1025 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1026 if (ctx->flags & FPSCR_SZ) {
1027 int fr = XREG(B7_4);
1028 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1029 ctx->memidx, MO_TEUL);
1030 tcg_gen_addi_i32(addr, addr, 4);
1031 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1032 ctx->memidx, MO_TEUL);
1033 } else {
1034 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1035 ctx->memidx, MO_TEUL);
1037 tcg_temp_free(addr);
1039 return;
1040 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1041 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1042 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1043 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1044 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1045 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1047 CHECK_FPU_ENABLED
1048 if (ctx->flags & FPSCR_PR) {
1049 TCGv_i64 fp0, fp1;
1051 if (ctx->opcode & 0x0110)
1052 break; /* illegal instruction */
1053 fp0 = tcg_temp_new_i64();
1054 fp1 = tcg_temp_new_i64();
1055 gen_load_fpr64(fp0, DREG(B11_8));
1056 gen_load_fpr64(fp1, DREG(B7_4));
1057 switch (ctx->opcode & 0xf00f) {
1058 case 0xf000: /* fadd Rm,Rn */
1059 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1060 break;
1061 case 0xf001: /* fsub Rm,Rn */
1062 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1063 break;
1064 case 0xf002: /* fmul Rm,Rn */
1065 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1066 break;
1067 case 0xf003: /* fdiv Rm,Rn */
1068 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1069 break;
1070 case 0xf004: /* fcmp/eq Rm,Rn */
1071 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1072 return;
1073 case 0xf005: /* fcmp/gt Rm,Rn */
1074 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1075 return;
1077 gen_store_fpr64(fp0, DREG(B11_8));
1078 tcg_temp_free_i64(fp0);
1079 tcg_temp_free_i64(fp1);
1080 } else {
1081 switch (ctx->opcode & 0xf00f) {
1082 case 0xf000: /* fadd Rm,Rn */
1083 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1084 cpu_fregs[FREG(B11_8)],
1085 cpu_fregs[FREG(B7_4)]);
1086 break;
1087 case 0xf001: /* fsub Rm,Rn */
1088 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1089 cpu_fregs[FREG(B11_8)],
1090 cpu_fregs[FREG(B7_4)]);
1091 break;
1092 case 0xf002: /* fmul Rm,Rn */
1093 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1094 cpu_fregs[FREG(B11_8)],
1095 cpu_fregs[FREG(B7_4)]);
1096 break;
1097 case 0xf003: /* fdiv Rm,Rn */
1098 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1099 cpu_fregs[FREG(B11_8)],
1100 cpu_fregs[FREG(B7_4)]);
1101 break;
1102 case 0xf004: /* fcmp/eq Rm,Rn */
1103 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1104 cpu_fregs[FREG(B7_4)]);
1105 return;
1106 case 0xf005: /* fcmp/gt Rm,Rn */
1107 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1108 cpu_fregs[FREG(B7_4)]);
1109 return;
1113 return;
1114 case 0xf00e: /* fmac FR0,RM,Rn */
1116 CHECK_FPU_ENABLED
1117 if (ctx->flags & FPSCR_PR) {
1118 break; /* illegal instruction */
1119 } else {
1120 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1121 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1122 cpu_fregs[FREG(B11_8)]);
1123 return;
1128 switch (ctx->opcode & 0xff00) {
1129 case 0xc900: /* and #imm,R0 */
1130 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1131 return;
1132 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1134 TCGv addr, val;
1135 addr = tcg_temp_new();
1136 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1137 val = tcg_temp_new();
1138 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1139 tcg_gen_andi_i32(val, val, B7_0);
1140 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1141 tcg_temp_free(val);
1142 tcg_temp_free(addr);
1144 return;
1145 case 0x8b00: /* bf label */
1146 CHECK_NOT_DELAY_SLOT
1147 gen_conditional_jump(ctx, ctx->pc + 2,
1148 ctx->pc + 4 + B7_0s * 2);
1149 ctx->bstate = BS_BRANCH;
1150 return;
1151 case 0x8f00: /* bf/s label */
1152 CHECK_NOT_DELAY_SLOT
1153 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1154 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1155 return;
1156 case 0x8900: /* bt label */
1157 CHECK_NOT_DELAY_SLOT
1158 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1159 ctx->pc + 2);
1160 ctx->bstate = BS_BRANCH;
1161 return;
1162 case 0x8d00: /* bt/s label */
1163 CHECK_NOT_DELAY_SLOT
1164 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1165 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1166 return;
1167 case 0x8800: /* cmp/eq #imm,R0 */
1168 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1169 return;
1170 case 0xc400: /* mov.b @(disp,GBR),R0 */
1172 TCGv addr = tcg_temp_new();
1173 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1174 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1175 tcg_temp_free(addr);
1177 return;
1178 case 0xc500: /* mov.w @(disp,GBR),R0 */
1180 TCGv addr = tcg_temp_new();
1181 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1182 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1183 tcg_temp_free(addr);
1185 return;
1186 case 0xc600: /* mov.l @(disp,GBR),R0 */
1188 TCGv addr = tcg_temp_new();
1189 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1190 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1191 tcg_temp_free(addr);
1193 return;
1194 case 0xc000: /* mov.b R0,@(disp,GBR) */
1196 TCGv addr = tcg_temp_new();
1197 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1198 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1199 tcg_temp_free(addr);
1201 return;
1202 case 0xc100: /* mov.w R0,@(disp,GBR) */
1204 TCGv addr = tcg_temp_new();
1205 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1206 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1207 tcg_temp_free(addr);
1209 return;
1210 case 0xc200: /* mov.l R0,@(disp,GBR) */
1212 TCGv addr = tcg_temp_new();
1213 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1214 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1215 tcg_temp_free(addr);
1217 return;
1218 case 0x8000: /* mov.b R0,@(disp,Rn) */
1220 TCGv addr = tcg_temp_new();
1221 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1222 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1223 tcg_temp_free(addr);
1225 return;
1226 case 0x8100: /* mov.w R0,@(disp,Rn) */
1228 TCGv addr = tcg_temp_new();
1229 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1230 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1231 tcg_temp_free(addr);
1233 return;
1234 case 0x8400: /* mov.b @(disp,Rn),R0 */
1236 TCGv addr = tcg_temp_new();
1237 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1238 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1239 tcg_temp_free(addr);
1241 return;
1242 case 0x8500: /* mov.w @(disp,Rn),R0 */
1244 TCGv addr = tcg_temp_new();
1245 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1246 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1247 tcg_temp_free(addr);
1249 return;
1250 case 0xc700: /* mova @(disp,PC),R0 */
1251 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1252 return;
1253 case 0xcb00: /* or #imm,R0 */
1254 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1255 return;
1256 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1258 TCGv addr, val;
1259 addr = tcg_temp_new();
1260 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1261 val = tcg_temp_new();
1262 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1263 tcg_gen_ori_i32(val, val, B7_0);
1264 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1265 tcg_temp_free(val);
1266 tcg_temp_free(addr);
1268 return;
1269 case 0xc300: /* trapa #imm */
1271 TCGv imm;
1272 CHECK_NOT_DELAY_SLOT
1273 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1274 imm = tcg_const_i32(B7_0);
1275 gen_helper_trapa(cpu_env, imm);
1276 tcg_temp_free(imm);
1277 ctx->bstate = BS_BRANCH;
1279 return;
1280 case 0xc800: /* tst #imm,R0 */
1282 TCGv val = tcg_temp_new();
1283 tcg_gen_andi_i32(val, REG(0), B7_0);
1284 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1285 tcg_temp_free(val);
1287 return;
1288 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1290 TCGv val = tcg_temp_new();
1291 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1292 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1293 tcg_gen_andi_i32(val, val, B7_0);
1294 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1295 tcg_temp_free(val);
1297 return;
1298 case 0xca00: /* xor #imm,R0 */
1299 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1300 return;
1301 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1303 TCGv addr, val;
1304 addr = tcg_temp_new();
1305 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1306 val = tcg_temp_new();
1307 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1308 tcg_gen_xori_i32(val, val, B7_0);
1309 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1310 tcg_temp_free(val);
1311 tcg_temp_free(addr);
1313 return;
1316 switch (ctx->opcode & 0xf08f) {
1317 case 0x408e: /* ldc Rm,Rn_BANK */
1318 CHECK_PRIVILEGED
1319 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1320 return;
1321 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1322 CHECK_PRIVILEGED
1323 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1324 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1325 return;
1326 case 0x0082: /* stc Rm_BANK,Rn */
1327 CHECK_PRIVILEGED
1328 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1329 return;
1330 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1331 CHECK_PRIVILEGED
1333 TCGv addr = tcg_temp_new();
1334 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1335 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1336 tcg_gen_mov_i32(REG(B11_8), addr);
1337 tcg_temp_free(addr);
1339 return;
1342 switch (ctx->opcode & 0xf0ff) {
1343 case 0x0023: /* braf Rn */
1344 CHECK_NOT_DELAY_SLOT
1345 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1346 ctx->flags |= DELAY_SLOT;
1347 ctx->delayed_pc = (uint32_t) - 1;
1348 return;
1349 case 0x0003: /* bsrf Rn */
1350 CHECK_NOT_DELAY_SLOT
1351 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1352 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1353 ctx->flags |= DELAY_SLOT;
1354 ctx->delayed_pc = (uint32_t) - 1;
1355 return;
1356 case 0x4015: /* cmp/pl Rn */
1357 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1358 return;
1359 case 0x4011: /* cmp/pz Rn */
1360 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1361 return;
1362 case 0x4010: /* dt Rn */
1363 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1364 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1365 return;
1366 case 0x402b: /* jmp @Rn */
1367 CHECK_NOT_DELAY_SLOT
1368 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1369 ctx->flags |= DELAY_SLOT;
1370 ctx->delayed_pc = (uint32_t) - 1;
1371 return;
1372 case 0x400b: /* jsr @Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1375 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1376 ctx->flags |= DELAY_SLOT;
1377 ctx->delayed_pc = (uint32_t) - 1;
1378 return;
1379 case 0x400e: /* ldc Rm,SR */
1380 CHECK_PRIVILEGED
1382 TCGv val = tcg_temp_new();
1383 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1384 gen_write_sr(val);
1385 tcg_temp_free(val);
1386 ctx->bstate = BS_STOP;
1388 return;
1389 case 0x4007: /* ldc.l @Rm+,SR */
1390 CHECK_PRIVILEGED
1392 TCGv val = tcg_temp_new();
1393 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1394 tcg_gen_andi_i32(val, val, 0x700083f3);
1395 gen_write_sr(val);
1396 tcg_temp_free(val);
1397 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1398 ctx->bstate = BS_STOP;
1400 return;
1401 case 0x0002: /* stc SR,Rn */
1402 CHECK_PRIVILEGED
1403 gen_read_sr(REG(B11_8));
1404 return;
1405 case 0x4003: /* stc SR,@-Rn */
1406 CHECK_PRIVILEGED
1408 TCGv addr = tcg_temp_new();
1409 TCGv val = tcg_temp_new();
1410 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1411 gen_read_sr(val);
1412 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1413 tcg_gen_mov_i32(REG(B11_8), addr);
1414 tcg_temp_free(val);
1415 tcg_temp_free(addr);
1417 return;
1418 #define LD(reg,ldnum,ldpnum,prechk) \
1419 case ldnum: \
1420 prechk \
1421 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1422 return; \
1423 case ldpnum: \
1424 prechk \
1425 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1426 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1427 return;
1428 #define ST(reg,stnum,stpnum,prechk) \
1429 case stnum: \
1430 prechk \
1431 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1432 return; \
1433 case stpnum: \
1434 prechk \
1436 TCGv addr = tcg_temp_new(); \
1437 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1438 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1439 tcg_gen_mov_i32(REG(B11_8), addr); \
1440 tcg_temp_free(addr); \
1442 return;
1443 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1444 LD(reg,ldnum,ldpnum,prechk) \
1445 ST(reg,stnum,stpnum,prechk)
1446 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1447 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1448 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1449 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1450 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1451 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1452 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1453 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1454 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1455 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1456 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1457 case 0x406a: /* lds Rm,FPSCR */
1458 CHECK_FPU_ENABLED
1459 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1460 ctx->bstate = BS_STOP;
1461 return;
1462 case 0x4066: /* lds.l @Rm+,FPSCR */
1463 CHECK_FPU_ENABLED
1465 TCGv addr = tcg_temp_new();
1466 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1467 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1468 gen_helper_ld_fpscr(cpu_env, addr);
1469 tcg_temp_free(addr);
1470 ctx->bstate = BS_STOP;
1472 return;
1473 case 0x006a: /* sts FPSCR,Rn */
1474 CHECK_FPU_ENABLED
1475 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1476 return;
1477 case 0x4062: /* sts FPSCR,@-Rn */
1478 CHECK_FPU_ENABLED
1480 TCGv addr, val;
1481 val = tcg_temp_new();
1482 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1483 addr = tcg_temp_new();
1484 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1485 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1486 tcg_gen_mov_i32(REG(B11_8), addr);
1487 tcg_temp_free(addr);
1488 tcg_temp_free(val);
1490 return;
1491 case 0x00c3: /* movca.l R0,@Rm */
1493 TCGv val = tcg_temp_new();
1494 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1495 gen_helper_movcal(cpu_env, REG(B11_8), val);
1496 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1498 ctx->has_movcal = 1;
1499 return;
1500 case 0x40a9:
1501 /* MOVUA.L @Rm,R0 (Rm) -> R0
1502 Load non-boundary-aligned data */
1503 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1504 return;
1505 case 0x40e9:
1506 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1507 Load non-boundary-aligned data */
1508 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1509 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1510 return;
1511 case 0x0029: /* movt Rn */
1512 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1513 return;
1514 case 0x0073:
1515 /* MOVCO.L
1516 LDST -> T
1517 If (T == 1) R0 -> (Rn)
1518 0 -> LDST
1520 if (ctx->features & SH_FEATURE_SH4A) {
1521 TCGLabel *label = gen_new_label();
1522 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1523 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1524 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1525 gen_set_label(label);
1526 tcg_gen_movi_i32(cpu_ldst, 0);
1527 return;
1528 } else
1529 break;
1530 case 0x0063:
1531 /* MOVLI.L @Rm,R0
1532 1 -> LDST
1533 (Rm) -> R0
1534 When interrupt/exception
1535 occurred 0 -> LDST
1537 if (ctx->features & SH_FEATURE_SH4A) {
1538 tcg_gen_movi_i32(cpu_ldst, 0);
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1540 tcg_gen_movi_i32(cpu_ldst, 1);
1541 return;
1542 } else
1543 break;
1544 case 0x0093: /* ocbi @Rn */
1546 gen_helper_ocbi(cpu_env, REG(B11_8));
1548 return;
1549 case 0x00a3: /* ocbp @Rn */
1550 case 0x00b3: /* ocbwb @Rn */
1551 /* These instructions are supposed to do nothing in case of
1552 a cache miss. Given that we only partially emulate caches
1553 it is safe to simply ignore them. */
1554 return;
1555 case 0x0083: /* pref @Rn */
1556 return;
1557 case 0x00d3: /* prefi @Rn */
1558 if (ctx->features & SH_FEATURE_SH4A)
1559 return;
1560 else
1561 break;
1562 case 0x00e3: /* icbi @Rn */
1563 if (ctx->features & SH_FEATURE_SH4A)
1564 return;
1565 else
1566 break;
1567 case 0x00ab: /* synco */
1568 if (ctx->features & SH_FEATURE_SH4A)
1569 return;
1570 else
1571 break;
1572 case 0x4024: /* rotcl Rn */
1574 TCGv tmp = tcg_temp_new();
1575 tcg_gen_mov_i32(tmp, cpu_sr_t);
1576 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1577 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1578 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1579 tcg_temp_free(tmp);
1581 return;
1582 case 0x4025: /* rotcr Rn */
1584 TCGv tmp = tcg_temp_new();
1585 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1586 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1587 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1588 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1589 tcg_temp_free(tmp);
1591 return;
1592 case 0x4004: /* rotl Rn */
1593 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1594 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1595 return;
1596 case 0x4005: /* rotr Rn */
1597 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1598 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1599 return;
1600 case 0x4000: /* shll Rn */
1601 case 0x4020: /* shal Rn */
1602 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1603 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1604 return;
1605 case 0x4021: /* shar Rn */
1606 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1607 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1608 return;
1609 case 0x4001: /* shlr Rn */
1610 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1611 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1612 return;
1613 case 0x4008: /* shll2 Rn */
1614 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1615 return;
1616 case 0x4018: /* shll8 Rn */
1617 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1618 return;
1619 case 0x4028: /* shll16 Rn */
1620 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1621 return;
1622 case 0x4009: /* shlr2 Rn */
1623 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1624 return;
1625 case 0x4019: /* shlr8 Rn */
1626 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1627 return;
1628 case 0x4029: /* shlr16 Rn */
1629 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1630 return;
1631 case 0x401b: /* tas.b @Rn */
1633 TCGv addr, val;
1634 addr = tcg_temp_local_new();
1635 tcg_gen_mov_i32(addr, REG(B11_8));
1636 val = tcg_temp_local_new();
1637 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1638 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1639 tcg_gen_ori_i32(val, val, 0x80);
1640 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1641 tcg_temp_free(val);
1642 tcg_temp_free(addr);
1644 return;
1645 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1646 CHECK_FPU_ENABLED
1647 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1648 return;
1649 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1650 CHECK_FPU_ENABLED
1651 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1652 return;
1653 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1654 CHECK_FPU_ENABLED
1655 if (ctx->flags & FPSCR_PR) {
1656 TCGv_i64 fp;
1657 if (ctx->opcode & 0x0100)
1658 break; /* illegal instruction */
1659 fp = tcg_temp_new_i64();
1660 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1661 gen_store_fpr64(fp, DREG(B11_8));
1662 tcg_temp_free_i64(fp);
1664 else {
1665 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1667 return;
1668 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1669 CHECK_FPU_ENABLED
1670 if (ctx->flags & FPSCR_PR) {
1671 TCGv_i64 fp;
1672 if (ctx->opcode & 0x0100)
1673 break; /* illegal instruction */
1674 fp = tcg_temp_new_i64();
1675 gen_load_fpr64(fp, DREG(B11_8));
1676 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1677 tcg_temp_free_i64(fp);
1679 else {
1680 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1682 return;
1683 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1684 CHECK_FPU_ENABLED
1686 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1688 return;
1689 case 0xf05d: /* fabs FRn/DRn */
1690 CHECK_FPU_ENABLED
1691 if (ctx->flags & FPSCR_PR) {
1692 if (ctx->opcode & 0x0100)
1693 break; /* illegal instruction */
1694 TCGv_i64 fp = tcg_temp_new_i64();
1695 gen_load_fpr64(fp, DREG(B11_8));
1696 gen_helper_fabs_DT(fp, fp);
1697 gen_store_fpr64(fp, DREG(B11_8));
1698 tcg_temp_free_i64(fp);
1699 } else {
1700 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1702 return;
1703 case 0xf06d: /* fsqrt FRn */
1704 CHECK_FPU_ENABLED
1705 if (ctx->flags & FPSCR_PR) {
1706 if (ctx->opcode & 0x0100)
1707 break; /* illegal instruction */
1708 TCGv_i64 fp = tcg_temp_new_i64();
1709 gen_load_fpr64(fp, DREG(B11_8));
1710 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1711 gen_store_fpr64(fp, DREG(B11_8));
1712 tcg_temp_free_i64(fp);
1713 } else {
1714 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1715 cpu_fregs[FREG(B11_8)]);
1717 return;
1718 case 0xf07d: /* fsrra FRn */
1719 CHECK_FPU_ENABLED
1720 break;
1721 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1722 CHECK_FPU_ENABLED
1723 if (!(ctx->flags & FPSCR_PR)) {
1724 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1726 return;
1727 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1728 CHECK_FPU_ENABLED
1729 if (!(ctx->flags & FPSCR_PR)) {
1730 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1732 return;
1733 case 0xf0ad: /* fcnvsd FPUL,DRn */
1734 CHECK_FPU_ENABLED
1736 TCGv_i64 fp = tcg_temp_new_i64();
1737 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1738 gen_store_fpr64(fp, DREG(B11_8));
1739 tcg_temp_free_i64(fp);
1741 return;
1742 case 0xf0bd: /* fcnvds DRn,FPUL */
1743 CHECK_FPU_ENABLED
1745 TCGv_i64 fp = tcg_temp_new_i64();
1746 gen_load_fpr64(fp, DREG(B11_8));
1747 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1748 tcg_temp_free_i64(fp);
1750 return;
1751 case 0xf0ed: /* fipr FVm,FVn */
1752 CHECK_FPU_ENABLED
1753 if ((ctx->flags & FPSCR_PR) == 0) {
1754 TCGv m, n;
1755 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1756 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1757 gen_helper_fipr(cpu_env, m, n);
1758 tcg_temp_free(m);
1759 tcg_temp_free(n);
1760 return;
1762 break;
1763 case 0xf0fd: /* ftrv XMTRX,FVn */
1764 CHECK_FPU_ENABLED
1765 if ((ctx->opcode & 0x0300) == 0x0100 &&
1766 (ctx->flags & FPSCR_PR) == 0) {
1767 TCGv n;
1768 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1769 gen_helper_ftrv(cpu_env, n);
1770 tcg_temp_free(n);
1771 return;
1773 break;
1775 #if 0
1776 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1777 ctx->opcode, ctx->pc);
1778 fflush(stderr);
1779 #endif
1780 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1781 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1782 gen_helper_raise_slot_illegal_instruction(cpu_env);
1783 } else {
1784 gen_helper_raise_illegal_instruction(cpu_env);
1786 ctx->bstate = BS_BRANCH;
1789 static void decode_opc(DisasContext * ctx)
1791 uint32_t old_flags = ctx->flags;
1793 _decode_opc(ctx);
1795 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1796 if (ctx->flags & DELAY_SLOT_CLEARME) {
1797 gen_store_flags(0);
1798 } else {
1799 /* go out of the delay slot */
1800 uint32_t new_flags = ctx->flags;
1801 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1802 gen_store_flags(new_flags);
1804 ctx->flags = 0;
1805 ctx->bstate = BS_BRANCH;
1806 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1807 gen_delayed_conditional_jump(ctx);
1808 } else if (old_flags & DELAY_SLOT) {
1809 gen_jump(ctx);
1814 /* go into a delay slot */
1815 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1816 gen_store_flags(ctx->flags);
1819 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1821 SuperHCPU *cpu = sh_env_get_cpu(env);
1822 CPUState *cs = CPU(cpu);
1823 DisasContext ctx;
1824 target_ulong pc_start;
1825 int num_insns;
1826 int max_insns;
1828 pc_start = tb->pc;
1829 ctx.pc = pc_start;
1830 ctx.flags = (uint32_t)tb->flags;
1831 ctx.bstate = BS_NONE;
1832 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1833 /* We don't know if the delayed pc came from a dynamic or static branch,
1834 so assume it is a dynamic branch. */
1835 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1836 ctx.tb = tb;
1837 ctx.singlestep_enabled = cs->singlestep_enabled;
1838 ctx.features = env->features;
1839 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1841 num_insns = 0;
1842 max_insns = tb->cflags & CF_COUNT_MASK;
1843 if (max_insns == 0) {
1844 max_insns = CF_COUNT_MASK;
1846 if (max_insns > TCG_MAX_INSNS) {
1847 max_insns = TCG_MAX_INSNS;
1850 gen_tb_start(tb);
1851 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1852 tcg_gen_insn_start(ctx.pc, ctx.flags);
1853 num_insns++;
1855 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1856 /* We have hit a breakpoint - make sure PC is up-to-date */
1857 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1858 gen_helper_debug(cpu_env);
1859 ctx.bstate = BS_BRANCH;
1860 /* The address covered by the breakpoint must be included in
1861 [tb->pc, tb->pc + tb->size) in order to for it to be
1862 properly cleared -- thus we increment the PC here so that
1863 the logic setting tb->size below does the right thing. */
1864 ctx.pc += 2;
1865 break;
1868 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1869 gen_io_start();
1872 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1873 decode_opc(&ctx);
1874 ctx.pc += 2;
1875 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1876 break;
1877 if (cs->singlestep_enabled) {
1878 break;
1880 if (num_insns >= max_insns)
1881 break;
1882 if (singlestep)
1883 break;
1885 if (tb->cflags & CF_LAST_IO)
1886 gen_io_end();
1887 if (cs->singlestep_enabled) {
1888 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1889 gen_helper_debug(cpu_env);
1890 } else {
1891 switch (ctx.bstate) {
1892 case BS_STOP:
1893 /* gen_op_interrupt_restart(); */
1894 /* fall through */
1895 case BS_NONE:
1896 if (ctx.flags) {
1897 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1899 gen_goto_tb(&ctx, 0, ctx.pc);
1900 break;
1901 case BS_EXCP:
1902 /* gen_op_interrupt_restart(); */
1903 tcg_gen_exit_tb(0);
1904 break;
1905 case BS_BRANCH:
1906 default:
1907 break;
1911 gen_tb_end(tb, num_insns);
1913 tb->size = ctx.pc - pc_start;
1914 tb->icount = num_insns;
1916 #ifdef DEBUG_DISAS
1917 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1918 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1919 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1920 qemu_log("\n");
1922 #endif
1925 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1926 target_ulong *data)
1928 env->pc = data[0];
1929 env->flags = data[1];