Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20160517' into staging
[qemu/ar7.git] / target-sh4 / translate.c
blob53f782c05467f17e24e09119abc9054e4599d5df
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 typedef struct DisasContext {
36 struct TranslationBlock *tb;
37 target_ulong pc;
38 uint16_t opcode;
39 uint32_t flags;
40 int bstate;
41 int memidx;
42 uint32_t delayed_pc;
43 int singlestep_enabled;
44 uint32_t features;
45 int has_movcal;
46 } DisasContext;
48 #if defined(CONFIG_USER_ONLY)
49 #define IS_USER(ctx) 1
50 #else
51 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
52 #endif
54 enum {
55 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
56 * exception condition
58 BS_STOP = 1, /* We want to stop translation for any reason */
59 BS_BRANCH = 2, /* We reached a branch condition */
60 BS_EXCP = 3, /* We reached an exception condition */
63 /* global register indexes */
64 static TCGv_env cpu_env;
65 static TCGv cpu_gregs[24];
66 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
67 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
68 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
69 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
70 static TCGv cpu_fregs[32];
72 /* internal register indexes */
73 static TCGv cpu_flags, cpu_delayed_pc;
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 if (done_init)
100 return;
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
109 cpu_pc = tcg_global_mem_new_i32(cpu_env,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
114 offsetof(CPUSH4State, sr_m), "SR_M");
115 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
116 offsetof(CPUSH4State, sr_q), "SR_Q");
117 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
118 offsetof(CPUSH4State, sr_t), "SR_T");
119 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
120 offsetof(CPUSH4State, ssr), "SSR");
121 cpu_spc = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, spc), "SPC");
123 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, gbr), "GBR");
125 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, vbr), "VBR");
127 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
128 offsetof(CPUSH4State, sgr), "SGR");
129 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
130 offsetof(CPUSH4State, dbr), "DBR");
131 cpu_mach = tcg_global_mem_new_i32(cpu_env,
132 offsetof(CPUSH4State, mach), "MACH");
133 cpu_macl = tcg_global_mem_new_i32(cpu_env,
134 offsetof(CPUSH4State, macl), "MACL");
135 cpu_pr = tcg_global_mem_new_i32(cpu_env,
136 offsetof(CPUSH4State, pr), "PR");
137 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, fpscr), "FPSCR");
139 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, fpul), "FPUL");
142 cpu_flags = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State, flags), "_flags_");
144 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
145 offsetof(CPUSH4State, delayed_pc),
146 "_delayed_pc_");
147 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, ldst), "_ldst_");
150 for (i = 0; i < 32; i++)
151 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
152 offsetof(CPUSH4State, fregs[i]),
153 fregnames[i]);
155 done_init = 1;
158 void superh_cpu_dump_state(CPUState *cs, FILE *f,
159 fprintf_function cpu_fprintf, int flags)
161 SuperHCPU *cpu = SUPERH_CPU(cs);
162 CPUSH4State *env = &cpu->env;
163 int i;
164 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
165 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
166 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
167 env->spc, env->ssr, env->gbr, env->vbr);
168 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
169 env->sgr, env->dbr, env->delayed_pc, env->fpul);
170 for (i = 0; i < 24; i += 4) {
171 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
172 i, env->gregs[i], i + 1, env->gregs[i + 1],
173 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 if (env->flags & DELAY_SLOT) {
176 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
179 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
184 static void gen_read_sr(TCGv dst)
186 TCGv t0 = tcg_temp_new();
187 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
188 tcg_gen_or_i32(dst, dst, t0);
189 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
190 tcg_gen_or_i32(dst, dst, t0);
191 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
192 tcg_gen_or_i32(dst, cpu_sr, t0);
193 tcg_temp_free_i32(t0);
196 static void gen_write_sr(TCGv src)
198 tcg_gen_andi_i32(cpu_sr, src,
199 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
200 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
201 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
202 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
203 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
204 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
205 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
208 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
210 if (unlikely(ctx->singlestep_enabled)) {
211 return false;
214 #ifndef CONFIG_USER_ONLY
215 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
216 #else
217 return true;
218 #endif
221 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
223 if (use_goto_tb(ctx, dest)) {
224 /* Use a direct jump if in same page and singlestep not enabled */
225 tcg_gen_goto_tb(n);
226 tcg_gen_movi_i32(cpu_pc, dest);
227 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
228 } else {
229 tcg_gen_movi_i32(cpu_pc, dest);
230 if (ctx->singlestep_enabled)
231 gen_helper_debug(cpu_env);
232 tcg_gen_exit_tb(0);
236 static void gen_jump(DisasContext * ctx)
238 if (ctx->delayed_pc == (uint32_t) - 1) {
239 /* Target is not statically known, it comes necessarily from a
240 delayed jump as immediate jump are conditinal jumps */
241 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
242 if (ctx->singlestep_enabled)
243 gen_helper_debug(cpu_env);
244 tcg_gen_exit_tb(0);
245 } else {
246 gen_goto_tb(ctx, 0, ctx->delayed_pc);
250 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
252 TCGLabel *label = gen_new_label();
253 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
254 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
255 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
256 gen_set_label(label);
259 /* Immediate conditional jump (bt or bf) */
260 static void gen_conditional_jump(DisasContext * ctx,
261 target_ulong ift, target_ulong ifnott)
263 TCGLabel *l1 = gen_new_label();
264 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
265 gen_goto_tb(ctx, 0, ifnott);
266 gen_set_label(l1);
267 gen_goto_tb(ctx, 1, ift);
270 /* Delayed conditional jump (bt or bf) */
271 static void gen_delayed_conditional_jump(DisasContext * ctx)
273 TCGLabel *l1;
274 TCGv ds;
276 l1 = gen_new_label();
277 ds = tcg_temp_new();
278 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
279 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
280 gen_goto_tb(ctx, 1, ctx->pc + 2);
281 gen_set_label(l1);
282 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
283 gen_jump(ctx);
286 static inline void gen_store_flags(uint32_t flags)
288 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
289 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
292 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
294 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
297 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
299 TCGv_i32 tmp = tcg_temp_new_i32();
300 tcg_gen_extrl_i64_i32(tmp, t);
301 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
302 tcg_gen_shri_i64(t, t, 32);
303 tcg_gen_extrl_i64_i32(tmp, t);
304 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
305 tcg_temp_free_i32(tmp);
308 #define B3_0 (ctx->opcode & 0xf)
309 #define B6_4 ((ctx->opcode >> 4) & 0x7)
310 #define B7_4 ((ctx->opcode >> 4) & 0xf)
311 #define B7_0 (ctx->opcode & 0xff)
312 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
313 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
314 (ctx->opcode & 0xfff))
315 #define B11_8 ((ctx->opcode >> 8) & 0xf)
316 #define B15_12 ((ctx->opcode >> 12) & 0xf)
318 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
319 && (ctx->flags & (1u << SR_RB))\
320 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
322 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
323 || !(ctx->flags & (1u << SR_RB)))\
324 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
326 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
327 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
328 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
329 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
331 #define CHECK_NOT_DELAY_SLOT \
332 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
334 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
335 gen_helper_raise_slot_illegal_instruction(cpu_env); \
336 ctx->bstate = BS_BRANCH; \
337 return; \
340 #define CHECK_PRIVILEGED \
341 if (IS_USER(ctx)) { \
342 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
343 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
344 gen_helper_raise_slot_illegal_instruction(cpu_env); \
345 } else { \
346 gen_helper_raise_illegal_instruction(cpu_env); \
348 ctx->bstate = BS_BRANCH; \
349 return; \
352 #define CHECK_FPU_ENABLED \
353 if (ctx->flags & (1u << SR_FD)) { \
354 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
355 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
356 gen_helper_raise_slot_fpu_disable(cpu_env); \
357 } else { \
358 gen_helper_raise_fpu_disable(cpu_env); \
360 ctx->bstate = BS_BRANCH; \
361 return; \
364 static void _decode_opc(DisasContext * ctx)
366 /* This code tries to make movcal emulation sufficiently
367 accurate for Linux purposes. This instruction writes
368 memory, and prior to that, always allocates a cache line.
369 It is used in two contexts:
370 - in memcpy, where data is copied in blocks, the first write
371 of to a block uses movca.l for performance.
372 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
373 to flush the cache. Here, the data written by movcal.l is never
374 written to memory, and the data written is just bogus.
376 To simulate this, we simulate movcal.l, we store the value to memory,
377 but we also remember the previous content. If we see ocbi, we check
378 if movcal.l for that address was done previously. If so, the write should
379 not have hit the memory, so we restore the previous content.
380 When we see an instruction that is neither movca.l
381 nor ocbi, the previous content is discarded.
383 To optimize, we only try to flush stores when we're at the start of
384 TB, or if we already saw movca.l in this TB and did not flush stores
385 yet. */
386 if (ctx->has_movcal)
388 int opcode = ctx->opcode & 0xf0ff;
389 if (opcode != 0x0093 /* ocbi */
390 && opcode != 0x00c3 /* movca.l */)
392 gen_helper_discard_movcal_backup(cpu_env);
393 ctx->has_movcal = 0;
397 #if 0
398 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
399 #endif
401 switch (ctx->opcode) {
402 case 0x0019: /* div0u */
403 tcg_gen_movi_i32(cpu_sr_m, 0);
404 tcg_gen_movi_i32(cpu_sr_q, 0);
405 tcg_gen_movi_i32(cpu_sr_t, 0);
406 return;
407 case 0x000b: /* rts */
408 CHECK_NOT_DELAY_SLOT
409 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
410 ctx->flags |= DELAY_SLOT;
411 ctx->delayed_pc = (uint32_t) - 1;
412 return;
413 case 0x0028: /* clrmac */
414 tcg_gen_movi_i32(cpu_mach, 0);
415 tcg_gen_movi_i32(cpu_macl, 0);
416 return;
417 case 0x0048: /* clrs */
418 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
419 return;
420 case 0x0008: /* clrt */
421 tcg_gen_movi_i32(cpu_sr_t, 0);
422 return;
423 case 0x0038: /* ldtlb */
424 CHECK_PRIVILEGED
425 gen_helper_ldtlb(cpu_env);
426 return;
427 case 0x002b: /* rte */
428 CHECK_PRIVILEGED
429 CHECK_NOT_DELAY_SLOT
430 gen_write_sr(cpu_ssr);
431 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
432 ctx->flags |= DELAY_SLOT;
433 ctx->delayed_pc = (uint32_t) - 1;
434 return;
435 case 0x0058: /* sets */
436 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
437 return;
438 case 0x0018: /* sett */
439 tcg_gen_movi_i32(cpu_sr_t, 1);
440 return;
441 case 0xfbfd: /* frchg */
442 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
443 ctx->bstate = BS_STOP;
444 return;
445 case 0xf3fd: /* fschg */
446 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
447 ctx->bstate = BS_STOP;
448 return;
449 case 0x0009: /* nop */
450 return;
451 case 0x001b: /* sleep */
452 CHECK_PRIVILEGED
453 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
454 gen_helper_sleep(cpu_env);
455 return;
458 switch (ctx->opcode & 0xf000) {
459 case 0x1000: /* mov.l Rm,@(disp,Rn) */
461 TCGv addr = tcg_temp_new();
462 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
463 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
464 tcg_temp_free(addr);
466 return;
467 case 0x5000: /* mov.l @(disp,Rm),Rn */
469 TCGv addr = tcg_temp_new();
470 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
471 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
472 tcg_temp_free(addr);
474 return;
475 case 0xe000: /* mov #imm,Rn */
476 tcg_gen_movi_i32(REG(B11_8), B7_0s);
477 return;
478 case 0x9000: /* mov.w @(disp,PC),Rn */
480 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
481 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
482 tcg_temp_free(addr);
484 return;
485 case 0xd000: /* mov.l @(disp,PC),Rn */
487 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
488 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
489 tcg_temp_free(addr);
491 return;
492 case 0x7000: /* add #imm,Rn */
493 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
494 return;
495 case 0xa000: /* bra disp */
496 CHECK_NOT_DELAY_SLOT
497 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
498 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
499 ctx->flags |= DELAY_SLOT;
500 return;
501 case 0xb000: /* bsr disp */
502 CHECK_NOT_DELAY_SLOT
503 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
504 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
505 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
506 ctx->flags |= DELAY_SLOT;
507 return;
510 switch (ctx->opcode & 0xf00f) {
511 case 0x6003: /* mov Rm,Rn */
512 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
513 return;
514 case 0x2000: /* mov.b Rm,@Rn */
515 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
516 return;
517 case 0x2001: /* mov.w Rm,@Rn */
518 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
519 return;
520 case 0x2002: /* mov.l Rm,@Rn */
521 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
522 return;
523 case 0x6000: /* mov.b @Rm,Rn */
524 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
525 return;
526 case 0x6001: /* mov.w @Rm,Rn */
527 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
528 return;
529 case 0x6002: /* mov.l @Rm,Rn */
530 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
531 return;
532 case 0x2004: /* mov.b Rm,@-Rn */
534 TCGv addr = tcg_temp_new();
535 tcg_gen_subi_i32(addr, REG(B11_8), 1);
536 /* might cause re-execution */
537 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
538 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
539 tcg_temp_free(addr);
541 return;
542 case 0x2005: /* mov.w Rm,@-Rn */
544 TCGv addr = tcg_temp_new();
545 tcg_gen_subi_i32(addr, REG(B11_8), 2);
546 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
547 tcg_gen_mov_i32(REG(B11_8), addr);
548 tcg_temp_free(addr);
550 return;
551 case 0x2006: /* mov.l Rm,@-Rn */
553 TCGv addr = tcg_temp_new();
554 tcg_gen_subi_i32(addr, REG(B11_8), 4);
555 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
556 tcg_gen_mov_i32(REG(B11_8), addr);
558 return;
559 case 0x6004: /* mov.b @Rm+,Rn */
560 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
561 if ( B11_8 != B7_4 )
562 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
563 return;
564 case 0x6005: /* mov.w @Rm+,Rn */
565 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
566 if ( B11_8 != B7_4 )
567 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
568 return;
569 case 0x6006: /* mov.l @Rm+,Rn */
570 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
571 if ( B11_8 != B7_4 )
572 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
573 return;
574 case 0x0004: /* mov.b Rm,@(R0,Rn) */
576 TCGv addr = tcg_temp_new();
577 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
578 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
579 tcg_temp_free(addr);
581 return;
582 case 0x0005: /* mov.w Rm,@(R0,Rn) */
584 TCGv addr = tcg_temp_new();
585 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
586 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
587 tcg_temp_free(addr);
589 return;
590 case 0x0006: /* mov.l Rm,@(R0,Rn) */
592 TCGv addr = tcg_temp_new();
593 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
594 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
595 tcg_temp_free(addr);
597 return;
598 case 0x000c: /* mov.b @(R0,Rm),Rn */
600 TCGv addr = tcg_temp_new();
601 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
602 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
603 tcg_temp_free(addr);
605 return;
606 case 0x000d: /* mov.w @(R0,Rm),Rn */
608 TCGv addr = tcg_temp_new();
609 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
610 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
611 tcg_temp_free(addr);
613 return;
614 case 0x000e: /* mov.l @(R0,Rm),Rn */
616 TCGv addr = tcg_temp_new();
617 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
618 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
619 tcg_temp_free(addr);
621 return;
622 case 0x6008: /* swap.b Rm,Rn */
624 TCGv low = tcg_temp_new();;
625 tcg_gen_ext16u_i32(low, REG(B7_4));
626 tcg_gen_bswap16_i32(low, low);
627 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
628 tcg_temp_free(low);
630 return;
631 case 0x6009: /* swap.w Rm,Rn */
632 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
633 return;
634 case 0x200d: /* xtrct Rm,Rn */
636 TCGv high, low;
637 high = tcg_temp_new();
638 tcg_gen_shli_i32(high, REG(B7_4), 16);
639 low = tcg_temp_new();
640 tcg_gen_shri_i32(low, REG(B11_8), 16);
641 tcg_gen_or_i32(REG(B11_8), high, low);
642 tcg_temp_free(low);
643 tcg_temp_free(high);
645 return;
646 case 0x300c: /* add Rm,Rn */
647 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
648 return;
649 case 0x300e: /* addc Rm,Rn */
651 TCGv t0, t1;
652 t0 = tcg_const_tl(0);
653 t1 = tcg_temp_new();
654 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
655 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
656 REG(B11_8), t0, t1, cpu_sr_t);
657 tcg_temp_free(t0);
658 tcg_temp_free(t1);
660 return;
661 case 0x300f: /* addv Rm,Rn */
663 TCGv t0, t1, t2;
664 t0 = tcg_temp_new();
665 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
666 t1 = tcg_temp_new();
667 tcg_gen_xor_i32(t1, t0, REG(B11_8));
668 t2 = tcg_temp_new();
669 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
670 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
671 tcg_temp_free(t2);
672 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
673 tcg_temp_free(t1);
674 tcg_gen_mov_i32(REG(B7_4), t0);
675 tcg_temp_free(t0);
677 return;
678 case 0x2009: /* and Rm,Rn */
679 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
680 return;
681 case 0x3000: /* cmp/eq Rm,Rn */
682 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
683 return;
684 case 0x3003: /* cmp/ge Rm,Rn */
685 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
686 return;
687 case 0x3007: /* cmp/gt Rm,Rn */
688 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
689 return;
690 case 0x3006: /* cmp/hi Rm,Rn */
691 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
692 return;
693 case 0x3002: /* cmp/hs Rm,Rn */
694 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
695 return;
696 case 0x200c: /* cmp/str Rm,Rn */
698 TCGv cmp1 = tcg_temp_new();
699 TCGv cmp2 = tcg_temp_new();
700 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
701 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
702 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
703 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
704 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
705 tcg_temp_free(cmp2);
706 tcg_temp_free(cmp1);
708 return;
709 case 0x2007: /* div0s Rm,Rn */
710 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
711 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
712 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
713 return;
714 case 0x3004: /* div1 Rm,Rn */
716 TCGv t0 = tcg_temp_new();
717 TCGv t1 = tcg_temp_new();
718 TCGv t2 = tcg_temp_new();
719 TCGv zero = tcg_const_i32(0);
721 /* shift left arg1, saving the bit being pushed out and inserting
722 T on the right */
723 tcg_gen_shri_i32(t0, REG(B11_8), 31);
724 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
725 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
727 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
728 using 64-bit temps, we compute arg0's high part from q ^ m, so
729 that it is 0x00000000 when adding the value or 0xffffffff when
730 subtracting it. */
731 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
732 tcg_gen_subi_i32(t1, t1, 1);
733 tcg_gen_neg_i32(t2, REG(B7_4));
734 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
735 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
737 /* compute T and Q depending on carry */
738 tcg_gen_andi_i32(t1, t1, 1);
739 tcg_gen_xor_i32(t1, t1, t0);
740 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
741 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
743 tcg_temp_free(zero);
744 tcg_temp_free(t2);
745 tcg_temp_free(t1);
746 tcg_temp_free(t0);
748 return;
749 case 0x300d: /* dmuls.l Rm,Rn */
750 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
751 return;
752 case 0x3005: /* dmulu.l Rm,Rn */
753 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
754 return;
755 case 0x600e: /* exts.b Rm,Rn */
756 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
757 return;
758 case 0x600f: /* exts.w Rm,Rn */
759 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
760 return;
761 case 0x600c: /* extu.b Rm,Rn */
762 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
763 return;
764 case 0x600d: /* extu.w Rm,Rn */
765 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
766 return;
767 case 0x000f: /* mac.l @Rm+,@Rn+ */
769 TCGv arg0, arg1;
770 arg0 = tcg_temp_new();
771 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
772 arg1 = tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
774 gen_helper_macl(cpu_env, arg0, arg1);
775 tcg_temp_free(arg1);
776 tcg_temp_free(arg0);
777 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
778 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
780 return;
781 case 0x400f: /* mac.w @Rm+,@Rn+ */
783 TCGv arg0, arg1;
784 arg0 = tcg_temp_new();
785 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
786 arg1 = tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
788 gen_helper_macw(cpu_env, arg0, arg1);
789 tcg_temp_free(arg1);
790 tcg_temp_free(arg0);
791 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
792 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
794 return;
795 case 0x0007: /* mul.l Rm,Rn */
796 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
797 return;
798 case 0x200f: /* muls.w Rm,Rn */
800 TCGv arg0, arg1;
801 arg0 = tcg_temp_new();
802 tcg_gen_ext16s_i32(arg0, REG(B7_4));
803 arg1 = tcg_temp_new();
804 tcg_gen_ext16s_i32(arg1, REG(B11_8));
805 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
806 tcg_temp_free(arg1);
807 tcg_temp_free(arg0);
809 return;
810 case 0x200e: /* mulu.w Rm,Rn */
812 TCGv arg0, arg1;
813 arg0 = tcg_temp_new();
814 tcg_gen_ext16u_i32(arg0, REG(B7_4));
815 arg1 = tcg_temp_new();
816 tcg_gen_ext16u_i32(arg1, REG(B11_8));
817 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
818 tcg_temp_free(arg1);
819 tcg_temp_free(arg0);
821 return;
822 case 0x600b: /* neg Rm,Rn */
823 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
824 return;
825 case 0x600a: /* negc Rm,Rn */
827 TCGv t0 = tcg_const_i32(0);
828 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
829 REG(B7_4), t0, cpu_sr_t, t0);
830 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
831 t0, t0, REG(B11_8), cpu_sr_t);
832 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
833 tcg_temp_free(t0);
835 return;
836 case 0x6007: /* not Rm,Rn */
837 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
838 return;
839 case 0x200b: /* or Rm,Rn */
840 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
841 return;
842 case 0x400c: /* shad Rm,Rn */
844 TCGv t0 = tcg_temp_new();
845 TCGv t1 = tcg_temp_new();
846 TCGv t2 = tcg_temp_new();
848 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
850 /* positive case: shift to the left */
851 tcg_gen_shl_i32(t1, REG(B11_8), t0);
853 /* negative case: shift to the right in two steps to
854 correctly handle the -32 case */
855 tcg_gen_xori_i32(t0, t0, 0x1f);
856 tcg_gen_sar_i32(t2, REG(B11_8), t0);
857 tcg_gen_sari_i32(t2, t2, 1);
859 /* select between the two cases */
860 tcg_gen_movi_i32(t0, 0);
861 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
863 tcg_temp_free(t0);
864 tcg_temp_free(t1);
865 tcg_temp_free(t2);
867 return;
868 case 0x400d: /* shld Rm,Rn */
870 TCGv t0 = tcg_temp_new();
871 TCGv t1 = tcg_temp_new();
872 TCGv t2 = tcg_temp_new();
874 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
876 /* positive case: shift to the left */
877 tcg_gen_shl_i32(t1, REG(B11_8), t0);
879 /* negative case: shift to the right in two steps to
880 correctly handle the -32 case */
881 tcg_gen_xori_i32(t0, t0, 0x1f);
882 tcg_gen_shr_i32(t2, REG(B11_8), t0);
883 tcg_gen_shri_i32(t2, t2, 1);
885 /* select between the two cases */
886 tcg_gen_movi_i32(t0, 0);
887 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
889 tcg_temp_free(t0);
890 tcg_temp_free(t1);
891 tcg_temp_free(t2);
893 return;
894 case 0x3008: /* sub Rm,Rn */
895 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
896 return;
897 case 0x300a: /* subc Rm,Rn */
899 TCGv t0, t1;
900 t0 = tcg_const_tl(0);
901 t1 = tcg_temp_new();
902 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
903 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
904 REG(B11_8), t0, t1, cpu_sr_t);
905 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
906 tcg_temp_free(t0);
907 tcg_temp_free(t1);
909 return;
910 case 0x300b: /* subv Rm,Rn */
912 TCGv t0, t1, t2;
913 t0 = tcg_temp_new();
914 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
915 t1 = tcg_temp_new();
916 tcg_gen_xor_i32(t1, t0, REG(B7_4));
917 t2 = tcg_temp_new();
918 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
919 tcg_gen_and_i32(t1, t1, t2);
920 tcg_temp_free(t2);
921 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
922 tcg_temp_free(t1);
923 tcg_gen_mov_i32(REG(B11_8), t0);
924 tcg_temp_free(t0);
926 return;
927 case 0x2008: /* tst Rm,Rn */
929 TCGv val = tcg_temp_new();
930 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
931 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
932 tcg_temp_free(val);
934 return;
935 case 0x200a: /* xor Rm,Rn */
936 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
937 return;
938 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
939 CHECK_FPU_ENABLED
940 if (ctx->flags & FPSCR_SZ) {
941 TCGv_i64 fp = tcg_temp_new_i64();
942 gen_load_fpr64(fp, XREG(B7_4));
943 gen_store_fpr64(fp, XREG(B11_8));
944 tcg_temp_free_i64(fp);
945 } else {
946 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
948 return;
949 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
950 CHECK_FPU_ENABLED
951 if (ctx->flags & FPSCR_SZ) {
952 TCGv addr_hi = tcg_temp_new();
953 int fr = XREG(B7_4);
954 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
955 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
956 ctx->memidx, MO_TEUL);
957 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
958 ctx->memidx, MO_TEUL);
959 tcg_temp_free(addr_hi);
960 } else {
961 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
962 ctx->memidx, MO_TEUL);
964 return;
965 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
966 CHECK_FPU_ENABLED
967 if (ctx->flags & FPSCR_SZ) {
968 TCGv addr_hi = tcg_temp_new();
969 int fr = XREG(B11_8);
970 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
971 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
972 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
973 tcg_temp_free(addr_hi);
974 } else {
975 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
976 ctx->memidx, MO_TEUL);
978 return;
979 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
980 CHECK_FPU_ENABLED
981 if (ctx->flags & FPSCR_SZ) {
982 TCGv addr_hi = tcg_temp_new();
983 int fr = XREG(B11_8);
984 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
985 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
986 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
987 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
988 tcg_temp_free(addr_hi);
989 } else {
990 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
991 ctx->memidx, MO_TEUL);
992 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
994 return;
995 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
996 CHECK_FPU_ENABLED
997 TCGv addr = tcg_temp_new_i32();
998 tcg_gen_subi_i32(addr, REG(B11_8), 4);
999 if (ctx->flags & FPSCR_SZ) {
1000 int fr = XREG(B7_4);
1001 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1002 tcg_gen_subi_i32(addr, addr, 4);
1003 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1004 } else {
1005 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1006 ctx->memidx, MO_TEUL);
1008 tcg_gen_mov_i32(REG(B11_8), addr);
1009 tcg_temp_free(addr);
1010 return;
1011 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1012 CHECK_FPU_ENABLED
1014 TCGv addr = tcg_temp_new_i32();
1015 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1016 if (ctx->flags & FPSCR_SZ) {
1017 int fr = XREG(B11_8);
1018 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1019 ctx->memidx, MO_TEUL);
1020 tcg_gen_addi_i32(addr, addr, 4);
1021 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1022 ctx->memidx, MO_TEUL);
1023 } else {
1024 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1025 ctx->memidx, MO_TEUL);
1027 tcg_temp_free(addr);
1029 return;
1030 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1031 CHECK_FPU_ENABLED
1033 TCGv addr = tcg_temp_new();
1034 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1035 if (ctx->flags & FPSCR_SZ) {
1036 int fr = XREG(B7_4);
1037 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1038 ctx->memidx, MO_TEUL);
1039 tcg_gen_addi_i32(addr, addr, 4);
1040 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1041 ctx->memidx, MO_TEUL);
1042 } else {
1043 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1044 ctx->memidx, MO_TEUL);
1046 tcg_temp_free(addr);
1048 return;
1049 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1050 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1051 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1054 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056 CHECK_FPU_ENABLED
1057 if (ctx->flags & FPSCR_PR) {
1058 TCGv_i64 fp0, fp1;
1060 if (ctx->opcode & 0x0110)
1061 break; /* illegal instruction */
1062 fp0 = tcg_temp_new_i64();
1063 fp1 = tcg_temp_new_i64();
1064 gen_load_fpr64(fp0, DREG(B11_8));
1065 gen_load_fpr64(fp1, DREG(B7_4));
1066 switch (ctx->opcode & 0xf00f) {
1067 case 0xf000: /* fadd Rm,Rn */
1068 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1069 break;
1070 case 0xf001: /* fsub Rm,Rn */
1071 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1072 break;
1073 case 0xf002: /* fmul Rm,Rn */
1074 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1075 break;
1076 case 0xf003: /* fdiv Rm,Rn */
1077 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1078 break;
1079 case 0xf004: /* fcmp/eq Rm,Rn */
1080 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1081 return;
1082 case 0xf005: /* fcmp/gt Rm,Rn */
1083 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1084 return;
1086 gen_store_fpr64(fp0, DREG(B11_8));
1087 tcg_temp_free_i64(fp0);
1088 tcg_temp_free_i64(fp1);
1089 } else {
1090 switch (ctx->opcode & 0xf00f) {
1091 case 0xf000: /* fadd Rm,Rn */
1092 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1093 cpu_fregs[FREG(B11_8)],
1094 cpu_fregs[FREG(B7_4)]);
1095 break;
1096 case 0xf001: /* fsub Rm,Rn */
1097 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1098 cpu_fregs[FREG(B11_8)],
1099 cpu_fregs[FREG(B7_4)]);
1100 break;
1101 case 0xf002: /* fmul Rm,Rn */
1102 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1103 cpu_fregs[FREG(B11_8)],
1104 cpu_fregs[FREG(B7_4)]);
1105 break;
1106 case 0xf003: /* fdiv Rm,Rn */
1107 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1108 cpu_fregs[FREG(B11_8)],
1109 cpu_fregs[FREG(B7_4)]);
1110 break;
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1113 cpu_fregs[FREG(B7_4)]);
1114 return;
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1117 cpu_fregs[FREG(B7_4)]);
1118 return;
1122 return;
1123 case 0xf00e: /* fmac FR0,RM,Rn */
1125 CHECK_FPU_ENABLED
1126 if (ctx->flags & FPSCR_PR) {
1127 break; /* illegal instruction */
1128 } else {
1129 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1130 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1131 cpu_fregs[FREG(B11_8)]);
1132 return;
1137 switch (ctx->opcode & 0xff00) {
1138 case 0xc900: /* and #imm,R0 */
1139 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1140 return;
1141 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1143 TCGv addr, val;
1144 addr = tcg_temp_new();
1145 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1146 val = tcg_temp_new();
1147 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1148 tcg_gen_andi_i32(val, val, B7_0);
1149 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1150 tcg_temp_free(val);
1151 tcg_temp_free(addr);
1153 return;
1154 case 0x8b00: /* bf label */
1155 CHECK_NOT_DELAY_SLOT
1156 gen_conditional_jump(ctx, ctx->pc + 2,
1157 ctx->pc + 4 + B7_0s * 2);
1158 ctx->bstate = BS_BRANCH;
1159 return;
1160 case 0x8f00: /* bf/s label */
1161 CHECK_NOT_DELAY_SLOT
1162 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1163 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1164 return;
1165 case 0x8900: /* bt label */
1166 CHECK_NOT_DELAY_SLOT
1167 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1168 ctx->pc + 2);
1169 ctx->bstate = BS_BRANCH;
1170 return;
1171 case 0x8d00: /* bt/s label */
1172 CHECK_NOT_DELAY_SLOT
1173 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1174 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1175 return;
1176 case 0x8800: /* cmp/eq #imm,R0 */
1177 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1178 return;
1179 case 0xc400: /* mov.b @(disp,GBR),R0 */
1181 TCGv addr = tcg_temp_new();
1182 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1183 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1184 tcg_temp_free(addr);
1186 return;
1187 case 0xc500: /* mov.w @(disp,GBR),R0 */
1189 TCGv addr = tcg_temp_new();
1190 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1191 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1192 tcg_temp_free(addr);
1194 return;
1195 case 0xc600: /* mov.l @(disp,GBR),R0 */
1197 TCGv addr = tcg_temp_new();
1198 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1199 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1200 tcg_temp_free(addr);
1202 return;
1203 case 0xc000: /* mov.b R0,@(disp,GBR) */
1205 TCGv addr = tcg_temp_new();
1206 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1207 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1208 tcg_temp_free(addr);
1210 return;
1211 case 0xc100: /* mov.w R0,@(disp,GBR) */
1213 TCGv addr = tcg_temp_new();
1214 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1215 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1216 tcg_temp_free(addr);
1218 return;
1219 case 0xc200: /* mov.l R0,@(disp,GBR) */
1221 TCGv addr = tcg_temp_new();
1222 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1223 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1224 tcg_temp_free(addr);
1226 return;
1227 case 0x8000: /* mov.b R0,@(disp,Rn) */
1229 TCGv addr = tcg_temp_new();
1230 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1231 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1232 tcg_temp_free(addr);
1234 return;
1235 case 0x8100: /* mov.w R0,@(disp,Rn) */
1237 TCGv addr = tcg_temp_new();
1238 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1239 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1240 tcg_temp_free(addr);
1242 return;
1243 case 0x8400: /* mov.b @(disp,Rn),R0 */
1245 TCGv addr = tcg_temp_new();
1246 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1247 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1248 tcg_temp_free(addr);
1250 return;
1251 case 0x8500: /* mov.w @(disp,Rn),R0 */
1253 TCGv addr = tcg_temp_new();
1254 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1255 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1256 tcg_temp_free(addr);
1258 return;
1259 case 0xc700: /* mova @(disp,PC),R0 */
1260 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1261 return;
1262 case 0xcb00: /* or #imm,R0 */
1263 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1264 return;
1265 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1267 TCGv addr, val;
1268 addr = tcg_temp_new();
1269 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1270 val = tcg_temp_new();
1271 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1272 tcg_gen_ori_i32(val, val, B7_0);
1273 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1274 tcg_temp_free(val);
1275 tcg_temp_free(addr);
1277 return;
1278 case 0xc300: /* trapa #imm */
1280 TCGv imm;
1281 CHECK_NOT_DELAY_SLOT
1282 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1283 imm = tcg_const_i32(B7_0);
1284 gen_helper_trapa(cpu_env, imm);
1285 tcg_temp_free(imm);
1286 ctx->bstate = BS_BRANCH;
1288 return;
1289 case 0xc800: /* tst #imm,R0 */
1291 TCGv val = tcg_temp_new();
1292 tcg_gen_andi_i32(val, REG(0), B7_0);
1293 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1294 tcg_temp_free(val);
1296 return;
1297 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1299 TCGv val = tcg_temp_new();
1300 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1301 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1302 tcg_gen_andi_i32(val, val, B7_0);
1303 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1304 tcg_temp_free(val);
1306 return;
1307 case 0xca00: /* xor #imm,R0 */
1308 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1309 return;
1310 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1312 TCGv addr, val;
1313 addr = tcg_temp_new();
1314 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1315 val = tcg_temp_new();
1316 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1317 tcg_gen_xori_i32(val, val, B7_0);
1318 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1319 tcg_temp_free(val);
1320 tcg_temp_free(addr);
1322 return;
1325 switch (ctx->opcode & 0xf08f) {
1326 case 0x408e: /* ldc Rm,Rn_BANK */
1327 CHECK_PRIVILEGED
1328 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1329 return;
1330 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1331 CHECK_PRIVILEGED
1332 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1333 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1334 return;
1335 case 0x0082: /* stc Rm_BANK,Rn */
1336 CHECK_PRIVILEGED
1337 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1338 return;
1339 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1340 CHECK_PRIVILEGED
1342 TCGv addr = tcg_temp_new();
1343 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1344 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1345 tcg_gen_mov_i32(REG(B11_8), addr);
1346 tcg_temp_free(addr);
1348 return;
1351 switch (ctx->opcode & 0xf0ff) {
1352 case 0x0023: /* braf Rn */
1353 CHECK_NOT_DELAY_SLOT
1354 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1355 ctx->flags |= DELAY_SLOT;
1356 ctx->delayed_pc = (uint32_t) - 1;
1357 return;
1358 case 0x0003: /* bsrf Rn */
1359 CHECK_NOT_DELAY_SLOT
1360 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1361 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1362 ctx->flags |= DELAY_SLOT;
1363 ctx->delayed_pc = (uint32_t) - 1;
1364 return;
1365 case 0x4015: /* cmp/pl Rn */
1366 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1367 return;
1368 case 0x4011: /* cmp/pz Rn */
1369 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1370 return;
1371 case 0x4010: /* dt Rn */
1372 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1373 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1374 return;
1375 case 0x402b: /* jmp @Rn */
1376 CHECK_NOT_DELAY_SLOT
1377 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1378 ctx->flags |= DELAY_SLOT;
1379 ctx->delayed_pc = (uint32_t) - 1;
1380 return;
1381 case 0x400b: /* jsr @Rn */
1382 CHECK_NOT_DELAY_SLOT
1383 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1384 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1385 ctx->flags |= DELAY_SLOT;
1386 ctx->delayed_pc = (uint32_t) - 1;
1387 return;
1388 case 0x400e: /* ldc Rm,SR */
1389 CHECK_PRIVILEGED
1391 TCGv val = tcg_temp_new();
1392 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1393 gen_write_sr(val);
1394 tcg_temp_free(val);
1395 ctx->bstate = BS_STOP;
1397 return;
1398 case 0x4007: /* ldc.l @Rm+,SR */
1399 CHECK_PRIVILEGED
1401 TCGv val = tcg_temp_new();
1402 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1403 tcg_gen_andi_i32(val, val, 0x700083f3);
1404 gen_write_sr(val);
1405 tcg_temp_free(val);
1406 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1407 ctx->bstate = BS_STOP;
1409 return;
1410 case 0x0002: /* stc SR,Rn */
1411 CHECK_PRIVILEGED
1412 gen_read_sr(REG(B11_8));
1413 return;
1414 case 0x4003: /* stc SR,@-Rn */
1415 CHECK_PRIVILEGED
1417 TCGv addr = tcg_temp_new();
1418 TCGv val = tcg_temp_new();
1419 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1420 gen_read_sr(val);
1421 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1422 tcg_gen_mov_i32(REG(B11_8), addr);
1423 tcg_temp_free(val);
1424 tcg_temp_free(addr);
1426 return;
1427 #define LD(reg,ldnum,ldpnum,prechk) \
1428 case ldnum: \
1429 prechk \
1430 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1431 return; \
1432 case ldpnum: \
1433 prechk \
1434 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1435 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1436 return;
1437 #define ST(reg,stnum,stpnum,prechk) \
1438 case stnum: \
1439 prechk \
1440 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1441 return; \
1442 case stpnum: \
1443 prechk \
1445 TCGv addr = tcg_temp_new(); \
1446 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1447 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1448 tcg_gen_mov_i32(REG(B11_8), addr); \
1449 tcg_temp_free(addr); \
1451 return;
1452 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1453 LD(reg,ldnum,ldpnum,prechk) \
1454 ST(reg,stnum,stpnum,prechk)
1455 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1456 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1457 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1458 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1459 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1460 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1461 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1462 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1463 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1464 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1465 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1466 case 0x406a: /* lds Rm,FPSCR */
1467 CHECK_FPU_ENABLED
1468 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1469 ctx->bstate = BS_STOP;
1470 return;
1471 case 0x4066: /* lds.l @Rm+,FPSCR */
1472 CHECK_FPU_ENABLED
1474 TCGv addr = tcg_temp_new();
1475 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1476 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1477 gen_helper_ld_fpscr(cpu_env, addr);
1478 tcg_temp_free(addr);
1479 ctx->bstate = BS_STOP;
1481 return;
1482 case 0x006a: /* sts FPSCR,Rn */
1483 CHECK_FPU_ENABLED
1484 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1485 return;
1486 case 0x4062: /* sts FPSCR,@-Rn */
1487 CHECK_FPU_ENABLED
1489 TCGv addr, val;
1490 val = tcg_temp_new();
1491 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1492 addr = tcg_temp_new();
1493 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1494 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1495 tcg_gen_mov_i32(REG(B11_8), addr);
1496 tcg_temp_free(addr);
1497 tcg_temp_free(val);
1499 return;
1500 case 0x00c3: /* movca.l R0,@Rm */
1502 TCGv val = tcg_temp_new();
1503 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1504 gen_helper_movcal(cpu_env, REG(B11_8), val);
1505 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1507 ctx->has_movcal = 1;
1508 return;
1509 case 0x40a9:
1510 /* MOVUA.L @Rm,R0 (Rm) -> R0
1511 Load non-boundary-aligned data */
1512 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1513 return;
1514 case 0x40e9:
1515 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1516 Load non-boundary-aligned data */
1517 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1518 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1519 return;
1520 case 0x0029: /* movt Rn */
1521 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1522 return;
1523 case 0x0073:
1524 /* MOVCO.L
1525 LDST -> T
1526 If (T == 1) R0 -> (Rn)
1527 0 -> LDST
1529 if (ctx->features & SH_FEATURE_SH4A) {
1530 TCGLabel *label = gen_new_label();
1531 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1532 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1533 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1534 gen_set_label(label);
1535 tcg_gen_movi_i32(cpu_ldst, 0);
1536 return;
1537 } else
1538 break;
1539 case 0x0063:
1540 /* MOVLI.L @Rm,R0
1541 1 -> LDST
1542 (Rm) -> R0
1543 When interrupt/exception
1544 occurred 0 -> LDST
1546 if (ctx->features & SH_FEATURE_SH4A) {
1547 tcg_gen_movi_i32(cpu_ldst, 0);
1548 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1549 tcg_gen_movi_i32(cpu_ldst, 1);
1550 return;
1551 } else
1552 break;
1553 case 0x0093: /* ocbi @Rn */
1555 gen_helper_ocbi(cpu_env, REG(B11_8));
1557 return;
1558 case 0x00a3: /* ocbp @Rn */
1559 case 0x00b3: /* ocbwb @Rn */
1560 /* These instructions are supposed to do nothing in case of
1561 a cache miss. Given that we only partially emulate caches
1562 it is safe to simply ignore them. */
1563 return;
1564 case 0x0083: /* pref @Rn */
1565 return;
1566 case 0x00d3: /* prefi @Rn */
1567 if (ctx->features & SH_FEATURE_SH4A)
1568 return;
1569 else
1570 break;
1571 case 0x00e3: /* icbi @Rn */
1572 if (ctx->features & SH_FEATURE_SH4A)
1573 return;
1574 else
1575 break;
1576 case 0x00ab: /* synco */
1577 if (ctx->features & SH_FEATURE_SH4A)
1578 return;
1579 else
1580 break;
1581 case 0x4024: /* rotcl Rn */
1583 TCGv tmp = tcg_temp_new();
1584 tcg_gen_mov_i32(tmp, cpu_sr_t);
1585 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1586 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1587 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1588 tcg_temp_free(tmp);
1590 return;
1591 case 0x4025: /* rotcr Rn */
1593 TCGv tmp = tcg_temp_new();
1594 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1595 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1596 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1597 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1598 tcg_temp_free(tmp);
1600 return;
1601 case 0x4004: /* rotl Rn */
1602 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1603 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1604 return;
1605 case 0x4005: /* rotr Rn */
1606 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1607 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1608 return;
1609 case 0x4000: /* shll Rn */
1610 case 0x4020: /* shal Rn */
1611 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1612 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1613 return;
1614 case 0x4021: /* shar Rn */
1615 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1616 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1617 return;
1618 case 0x4001: /* shlr Rn */
1619 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1620 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1621 return;
1622 case 0x4008: /* shll2 Rn */
1623 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1624 return;
1625 case 0x4018: /* shll8 Rn */
1626 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1627 return;
1628 case 0x4028: /* shll16 Rn */
1629 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1630 return;
1631 case 0x4009: /* shlr2 Rn */
1632 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1633 return;
1634 case 0x4019: /* shlr8 Rn */
1635 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1636 return;
1637 case 0x4029: /* shlr16 Rn */
1638 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1639 return;
1640 case 0x401b: /* tas.b @Rn */
1642 TCGv addr, val;
1643 addr = tcg_temp_local_new();
1644 tcg_gen_mov_i32(addr, REG(B11_8));
1645 val = tcg_temp_local_new();
1646 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1647 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1648 tcg_gen_ori_i32(val, val, 0x80);
1649 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1650 tcg_temp_free(val);
1651 tcg_temp_free(addr);
1653 return;
1654 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1655 CHECK_FPU_ENABLED
1656 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1657 return;
1658 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1659 CHECK_FPU_ENABLED
1660 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1661 return;
1662 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1663 CHECK_FPU_ENABLED
1664 if (ctx->flags & FPSCR_PR) {
1665 TCGv_i64 fp;
1666 if (ctx->opcode & 0x0100)
1667 break; /* illegal instruction */
1668 fp = tcg_temp_new_i64();
1669 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1670 gen_store_fpr64(fp, DREG(B11_8));
1671 tcg_temp_free_i64(fp);
1673 else {
1674 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1676 return;
1677 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1678 CHECK_FPU_ENABLED
1679 if (ctx->flags & FPSCR_PR) {
1680 TCGv_i64 fp;
1681 if (ctx->opcode & 0x0100)
1682 break; /* illegal instruction */
1683 fp = tcg_temp_new_i64();
1684 gen_load_fpr64(fp, DREG(B11_8));
1685 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1686 tcg_temp_free_i64(fp);
1688 else {
1689 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1691 return;
1692 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1693 CHECK_FPU_ENABLED
1695 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1697 return;
1698 case 0xf05d: /* fabs FRn/DRn */
1699 CHECK_FPU_ENABLED
1700 if (ctx->flags & FPSCR_PR) {
1701 if (ctx->opcode & 0x0100)
1702 break; /* illegal instruction */
1703 TCGv_i64 fp = tcg_temp_new_i64();
1704 gen_load_fpr64(fp, DREG(B11_8));
1705 gen_helper_fabs_DT(fp, fp);
1706 gen_store_fpr64(fp, DREG(B11_8));
1707 tcg_temp_free_i64(fp);
1708 } else {
1709 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1711 return;
1712 case 0xf06d: /* fsqrt FRn */
1713 CHECK_FPU_ENABLED
1714 if (ctx->flags & FPSCR_PR) {
1715 if (ctx->opcode & 0x0100)
1716 break; /* illegal instruction */
1717 TCGv_i64 fp = tcg_temp_new_i64();
1718 gen_load_fpr64(fp, DREG(B11_8));
1719 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1720 gen_store_fpr64(fp, DREG(B11_8));
1721 tcg_temp_free_i64(fp);
1722 } else {
1723 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1724 cpu_fregs[FREG(B11_8)]);
1726 return;
1727 case 0xf07d: /* fsrra FRn */
1728 CHECK_FPU_ENABLED
1729 break;
1730 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1731 CHECK_FPU_ENABLED
1732 if (!(ctx->flags & FPSCR_PR)) {
1733 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1735 return;
1736 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1737 CHECK_FPU_ENABLED
1738 if (!(ctx->flags & FPSCR_PR)) {
1739 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1741 return;
1742 case 0xf0ad: /* fcnvsd FPUL,DRn */
1743 CHECK_FPU_ENABLED
1745 TCGv_i64 fp = tcg_temp_new_i64();
1746 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1747 gen_store_fpr64(fp, DREG(B11_8));
1748 tcg_temp_free_i64(fp);
1750 return;
1751 case 0xf0bd: /* fcnvds DRn,FPUL */
1752 CHECK_FPU_ENABLED
1754 TCGv_i64 fp = tcg_temp_new_i64();
1755 gen_load_fpr64(fp, DREG(B11_8));
1756 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1757 tcg_temp_free_i64(fp);
1759 return;
1760 case 0xf0ed: /* fipr FVm,FVn */
1761 CHECK_FPU_ENABLED
1762 if ((ctx->flags & FPSCR_PR) == 0) {
1763 TCGv m, n;
1764 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1765 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1766 gen_helper_fipr(cpu_env, m, n);
1767 tcg_temp_free(m);
1768 tcg_temp_free(n);
1769 return;
1771 break;
1772 case 0xf0fd: /* ftrv XMTRX,FVn */
1773 CHECK_FPU_ENABLED
1774 if ((ctx->opcode & 0x0300) == 0x0100 &&
1775 (ctx->flags & FPSCR_PR) == 0) {
1776 TCGv n;
1777 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1778 gen_helper_ftrv(cpu_env, n);
1779 tcg_temp_free(n);
1780 return;
1782 break;
1784 #if 0
1785 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1786 ctx->opcode, ctx->pc);
1787 fflush(stderr);
1788 #endif
1789 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1790 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1791 gen_helper_raise_slot_illegal_instruction(cpu_env);
1792 } else {
1793 gen_helper_raise_illegal_instruction(cpu_env);
1795 ctx->bstate = BS_BRANCH;
1798 static void decode_opc(DisasContext * ctx)
1800 uint32_t old_flags = ctx->flags;
1802 _decode_opc(ctx);
1804 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1805 if (ctx->flags & DELAY_SLOT_CLEARME) {
1806 gen_store_flags(0);
1807 } else {
1808 /* go out of the delay slot */
1809 uint32_t new_flags = ctx->flags;
1810 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1811 gen_store_flags(new_flags);
1813 ctx->flags = 0;
1814 ctx->bstate = BS_BRANCH;
1815 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1816 gen_delayed_conditional_jump(ctx);
1817 } else if (old_flags & DELAY_SLOT) {
1818 gen_jump(ctx);
1823 /* go into a delay slot */
1824 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1825 gen_store_flags(ctx->flags);
1828 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1830 SuperHCPU *cpu = sh_env_get_cpu(env);
1831 CPUState *cs = CPU(cpu);
1832 DisasContext ctx;
1833 target_ulong pc_start;
1834 int num_insns;
1835 int max_insns;
1837 pc_start = tb->pc;
1838 ctx.pc = pc_start;
1839 ctx.flags = (uint32_t)tb->flags;
1840 ctx.bstate = BS_NONE;
1841 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1842 /* We don't know if the delayed pc came from a dynamic or static branch,
1843 so assume it is a dynamic branch. */
1844 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1845 ctx.tb = tb;
1846 ctx.singlestep_enabled = cs->singlestep_enabled;
1847 ctx.features = env->features;
1848 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1850 num_insns = 0;
1851 max_insns = tb->cflags & CF_COUNT_MASK;
1852 if (max_insns == 0) {
1853 max_insns = CF_COUNT_MASK;
1855 if (max_insns > TCG_MAX_INSNS) {
1856 max_insns = TCG_MAX_INSNS;
1859 gen_tb_start(tb);
1860 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1861 tcg_gen_insn_start(ctx.pc, ctx.flags);
1862 num_insns++;
1864 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1865 /* We have hit a breakpoint - make sure PC is up-to-date */
1866 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1867 gen_helper_debug(cpu_env);
1868 ctx.bstate = BS_BRANCH;
1869 /* The address covered by the breakpoint must be included in
1870 [tb->pc, tb->pc + tb->size) in order to for it to be
1871 properly cleared -- thus we increment the PC here so that
1872 the logic setting tb->size below does the right thing. */
1873 ctx.pc += 2;
1874 break;
1877 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1878 gen_io_start();
1881 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1882 decode_opc(&ctx);
1883 ctx.pc += 2;
1884 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1885 break;
1886 if (cs->singlestep_enabled) {
1887 break;
1889 if (num_insns >= max_insns)
1890 break;
1891 if (singlestep)
1892 break;
1894 if (tb->cflags & CF_LAST_IO)
1895 gen_io_end();
1896 if (cs->singlestep_enabled) {
1897 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1898 gen_helper_debug(cpu_env);
1899 } else {
1900 switch (ctx.bstate) {
1901 case BS_STOP:
1902 /* gen_op_interrupt_restart(); */
1903 /* fall through */
1904 case BS_NONE:
1905 if (ctx.flags) {
1906 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1908 gen_goto_tb(&ctx, 0, ctx.pc);
1909 break;
1910 case BS_EXCP:
1911 /* gen_op_interrupt_restart(); */
1912 tcg_gen_exit_tb(0);
1913 break;
1914 case BS_BRANCH:
1915 default:
1916 break;
1920 gen_tb_end(tb, num_insns);
1922 tb->size = ctx.pc - pc_start;
1923 tb->icount = num_insns;
1925 #ifdef DEBUG_DISAS
1926 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1927 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1928 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1929 qemu_log("\n");
1931 #endif
1934 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1935 target_ulong *data)
1937 env->pc = data[0];
1938 env->flags = data[1];