rcu: actually register threads that have RCU read-side critical sections
[qemu/ar7.git] / target-sh4 / translate.c
blob3b4a1b5cea857e50abe3a0f0ac19e78280c5215b
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
33 typedef struct DisasContext {
34 struct TranslationBlock *tb;
35 target_ulong pc;
36 uint16_t opcode;
37 uint32_t flags;
38 int bstate;
39 int memidx;
40 uint32_t delayed_pc;
41 int singlestep_enabled;
42 uint32_t features;
43 int has_movcal;
44 } DisasContext;
46 #if defined(CONFIG_USER_ONLY)
47 #define IS_USER(ctx) 1
48 #else
49 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
50 #endif
52 enum {
53 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
54 * exception condition
56 BS_STOP = 1, /* We want to stop translation for any reason */
57 BS_BRANCH = 2, /* We reached a branch condition */
58 BS_EXCP = 3, /* We reached an exception condition */
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_gregs[24];
64 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
75 #include "exec/gen-icount.h"
77 void sh4_translate_init(void)
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
99 if (done_init)
100 return;
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_sr_m = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, sr_m), "SR_M");
115 cpu_sr_q = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, sr_q), "SR_Q");
117 cpu_sr_t = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, sr_t), "SR_T");
119 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, ssr), "SSR");
121 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, spc), "SPC");
123 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, gbr), "GBR");
125 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, vbr), "VBR");
127 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, sgr), "SGR");
129 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, dbr), "DBR");
131 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, mach), "MACH");
133 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, macl), "MACL");
135 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUSH4State, pr), "PR");
137 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
138 offsetof(CPUSH4State, fpscr), "FPSCR");
139 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
140 offsetof(CPUSH4State, fpul), "FPUL");
142 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUSH4State, flags), "_flags_");
144 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
145 offsetof(CPUSH4State, delayed_pc),
146 "_delayed_pc_");
147 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
148 offsetof(CPUSH4State, ldst), "_ldst_");
150 for (i = 0; i < 32; i++)
151 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
152 offsetof(CPUSH4State, fregs[i]),
153 fregnames[i]);
155 done_init = 1;
158 void superh_cpu_dump_state(CPUState *cs, FILE *f,
159 fprintf_function cpu_fprintf, int flags)
161 SuperHCPU *cpu = SUPERH_CPU(cs);
162 CPUSH4State *env = &cpu->env;
163 int i;
164 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
165 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
166 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
167 env->spc, env->ssr, env->gbr, env->vbr);
168 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
169 env->sgr, env->dbr, env->delayed_pc, env->fpul);
170 for (i = 0; i < 24; i += 4) {
171 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
172 i, env->gregs[i], i + 1, env->gregs[i + 1],
173 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 if (env->flags & DELAY_SLOT) {
176 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
179 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
180 env->delayed_pc);
184 static void gen_read_sr(TCGv dst)
186 TCGv t0 = tcg_temp_new();
187 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
188 tcg_gen_or_i32(dst, dst, t0);
189 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
190 tcg_gen_or_i32(dst, dst, t0);
191 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
192 tcg_gen_or_i32(dst, cpu_sr, t0);
193 tcg_temp_free_i32(t0);
196 static void gen_write_sr(TCGv src)
198 tcg_gen_andi_i32(cpu_sr, src,
199 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
200 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
201 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
202 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
203 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
204 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
205 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
208 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
210 TranslationBlock *tb;
211 tb = ctx->tb;
213 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
214 !ctx->singlestep_enabled) {
215 /* Use a direct jump if in same page and singlestep not enabled */
216 tcg_gen_goto_tb(n);
217 tcg_gen_movi_i32(cpu_pc, dest);
218 tcg_gen_exit_tb((uintptr_t)tb + n);
219 } else {
220 tcg_gen_movi_i32(cpu_pc, dest);
221 if (ctx->singlestep_enabled)
222 gen_helper_debug(cpu_env);
223 tcg_gen_exit_tb(0);
227 static void gen_jump(DisasContext * ctx)
229 if (ctx->delayed_pc == (uint32_t) - 1) {
230 /* Target is not statically known, it comes necessarily from a
231 delayed jump as immediate jump are conditinal jumps */
232 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
233 if (ctx->singlestep_enabled)
234 gen_helper_debug(cpu_env);
235 tcg_gen_exit_tb(0);
236 } else {
237 gen_goto_tb(ctx, 0, ctx->delayed_pc);
241 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
243 TCGLabel *label = gen_new_label();
244 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
245 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
246 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
247 gen_set_label(label);
250 /* Immediate conditional jump (bt or bf) */
251 static void gen_conditional_jump(DisasContext * ctx,
252 target_ulong ift, target_ulong ifnott)
254 TCGLabel *l1 = gen_new_label();
255 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
256 gen_goto_tb(ctx, 0, ifnott);
257 gen_set_label(l1);
258 gen_goto_tb(ctx, 1, ift);
261 /* Delayed conditional jump (bt or bf) */
262 static void gen_delayed_conditional_jump(DisasContext * ctx)
264 TCGLabel *l1;
265 TCGv ds;
267 l1 = gen_new_label();
268 ds = tcg_temp_new();
269 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
270 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
271 gen_goto_tb(ctx, 1, ctx->pc + 2);
272 gen_set_label(l1);
273 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
274 gen_jump(ctx);
277 static inline void gen_store_flags(uint32_t flags)
279 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
280 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
283 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
285 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
288 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
290 TCGv_i32 tmp = tcg_temp_new_i32();
291 tcg_gen_trunc_i64_i32(tmp, t);
292 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
293 tcg_gen_shri_i64(t, t, 32);
294 tcg_gen_trunc_i64_i32(tmp, t);
295 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
296 tcg_temp_free_i32(tmp);
299 #define B3_0 (ctx->opcode & 0xf)
300 #define B6_4 ((ctx->opcode >> 4) & 0x7)
301 #define B7_4 ((ctx->opcode >> 4) & 0xf)
302 #define B7_0 (ctx->opcode & 0xff)
303 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
304 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
305 (ctx->opcode & 0xfff))
306 #define B11_8 ((ctx->opcode >> 8) & 0xf)
307 #define B15_12 ((ctx->opcode >> 12) & 0xf)
309 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
310 && (ctx->flags & (1u << SR_RB))\
311 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
313 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
314 || !(ctx->flags & (1u << SR_RB)))\
315 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
317 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
318 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
319 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
320 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
322 #define CHECK_NOT_DELAY_SLOT \
323 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
325 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
326 gen_helper_raise_slot_illegal_instruction(cpu_env); \
327 ctx->bstate = BS_BRANCH; \
328 return; \
331 #define CHECK_PRIVILEGED \
332 if (IS_USER(ctx)) { \
333 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
334 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
335 gen_helper_raise_slot_illegal_instruction(cpu_env); \
336 } else { \
337 gen_helper_raise_illegal_instruction(cpu_env); \
339 ctx->bstate = BS_BRANCH; \
340 return; \
343 #define CHECK_FPU_ENABLED \
344 if (ctx->flags & (1u << SR_FD)) { \
345 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
346 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
347 gen_helper_raise_slot_fpu_disable(cpu_env); \
348 } else { \
349 gen_helper_raise_fpu_disable(cpu_env); \
351 ctx->bstate = BS_BRANCH; \
352 return; \
355 static void _decode_opc(DisasContext * ctx)
357 /* This code tries to make movcal emulation sufficiently
358 accurate for Linux purposes. This instruction writes
359 memory, and prior to that, always allocates a cache line.
360 It is used in two contexts:
361 - in memcpy, where data is copied in blocks, the first write
362 of to a block uses movca.l for performance.
363 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
364 to flush the cache. Here, the data written by movcal.l is never
365 written to memory, and the data written is just bogus.
367 To simulate this, we simulate movcal.l, we store the value to memory,
368 but we also remember the previous content. If we see ocbi, we check
369 if movcal.l for that address was done previously. If so, the write should
370 not have hit the memory, so we restore the previous content.
371 When we see an instruction that is neither movca.l
372 nor ocbi, the previous content is discarded.
374 To optimize, we only try to flush stores when we're at the start of
375 TB, or if we already saw movca.l in this TB and did not flush stores
376 yet. */
377 if (ctx->has_movcal)
379 int opcode = ctx->opcode & 0xf0ff;
380 if (opcode != 0x0093 /* ocbi */
381 && opcode != 0x00c3 /* movca.l */)
383 gen_helper_discard_movcal_backup(cpu_env);
384 ctx->has_movcal = 0;
388 #if 0
389 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
390 #endif
392 switch (ctx->opcode) {
393 case 0x0019: /* div0u */
394 tcg_gen_movi_i32(cpu_sr_m, 0);
395 tcg_gen_movi_i32(cpu_sr_q, 0);
396 tcg_gen_movi_i32(cpu_sr_t, 0);
397 return;
398 case 0x000b: /* rts */
399 CHECK_NOT_DELAY_SLOT
400 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
401 ctx->flags |= DELAY_SLOT;
402 ctx->delayed_pc = (uint32_t) - 1;
403 return;
404 case 0x0028: /* clrmac */
405 tcg_gen_movi_i32(cpu_mach, 0);
406 tcg_gen_movi_i32(cpu_macl, 0);
407 return;
408 case 0x0048: /* clrs */
409 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
410 return;
411 case 0x0008: /* clrt */
412 tcg_gen_movi_i32(cpu_sr_t, 0);
413 return;
414 case 0x0038: /* ldtlb */
415 CHECK_PRIVILEGED
416 gen_helper_ldtlb(cpu_env);
417 return;
418 case 0x002b: /* rte */
419 CHECK_PRIVILEGED
420 CHECK_NOT_DELAY_SLOT
421 gen_write_sr(cpu_ssr);
422 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
423 ctx->flags |= DELAY_SLOT;
424 ctx->delayed_pc = (uint32_t) - 1;
425 return;
426 case 0x0058: /* sets */
427 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
428 return;
429 case 0x0018: /* sett */
430 tcg_gen_movi_i32(cpu_sr_t, 1);
431 return;
432 case 0xfbfd: /* frchg */
433 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
434 ctx->bstate = BS_STOP;
435 return;
436 case 0xf3fd: /* fschg */
437 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
438 ctx->bstate = BS_STOP;
439 return;
440 case 0x0009: /* nop */
441 return;
442 case 0x001b: /* sleep */
443 CHECK_PRIVILEGED
444 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
445 gen_helper_sleep(cpu_env);
446 return;
449 switch (ctx->opcode & 0xf000) {
450 case 0x1000: /* mov.l Rm,@(disp,Rn) */
452 TCGv addr = tcg_temp_new();
453 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
454 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
455 tcg_temp_free(addr);
457 return;
458 case 0x5000: /* mov.l @(disp,Rm),Rn */
460 TCGv addr = tcg_temp_new();
461 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
462 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
463 tcg_temp_free(addr);
465 return;
466 case 0xe000: /* mov #imm,Rn */
467 tcg_gen_movi_i32(REG(B11_8), B7_0s);
468 return;
469 case 0x9000: /* mov.w @(disp,PC),Rn */
471 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
472 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
473 tcg_temp_free(addr);
475 return;
476 case 0xd000: /* mov.l @(disp,PC),Rn */
478 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
479 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
480 tcg_temp_free(addr);
482 return;
483 case 0x7000: /* add #imm,Rn */
484 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
485 return;
486 case 0xa000: /* bra disp */
487 CHECK_NOT_DELAY_SLOT
488 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
489 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
490 ctx->flags |= DELAY_SLOT;
491 return;
492 case 0xb000: /* bsr disp */
493 CHECK_NOT_DELAY_SLOT
494 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
495 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
496 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
497 ctx->flags |= DELAY_SLOT;
498 return;
501 switch (ctx->opcode & 0xf00f) {
502 case 0x6003: /* mov Rm,Rn */
503 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
504 return;
505 case 0x2000: /* mov.b Rm,@Rn */
506 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
507 return;
508 case 0x2001: /* mov.w Rm,@Rn */
509 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
510 return;
511 case 0x2002: /* mov.l Rm,@Rn */
512 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
513 return;
514 case 0x6000: /* mov.b @Rm,Rn */
515 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
516 return;
517 case 0x6001: /* mov.w @Rm,Rn */
518 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
519 return;
520 case 0x6002: /* mov.l @Rm,Rn */
521 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
522 return;
523 case 0x2004: /* mov.b Rm,@-Rn */
525 TCGv addr = tcg_temp_new();
526 tcg_gen_subi_i32(addr, REG(B11_8), 1);
527 /* might cause re-execution */
528 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
529 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
530 tcg_temp_free(addr);
532 return;
533 case 0x2005: /* mov.w Rm,@-Rn */
535 TCGv addr = tcg_temp_new();
536 tcg_gen_subi_i32(addr, REG(B11_8), 2);
537 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
538 tcg_gen_mov_i32(REG(B11_8), addr);
539 tcg_temp_free(addr);
541 return;
542 case 0x2006: /* mov.l Rm,@-Rn */
544 TCGv addr = tcg_temp_new();
545 tcg_gen_subi_i32(addr, REG(B11_8), 4);
546 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
547 tcg_gen_mov_i32(REG(B11_8), addr);
549 return;
550 case 0x6004: /* mov.b @Rm+,Rn */
551 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
552 if ( B11_8 != B7_4 )
553 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
554 return;
555 case 0x6005: /* mov.w @Rm+,Rn */
556 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
557 if ( B11_8 != B7_4 )
558 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
559 return;
560 case 0x6006: /* mov.l @Rm+,Rn */
561 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
562 if ( B11_8 != B7_4 )
563 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
564 return;
565 case 0x0004: /* mov.b Rm,@(R0,Rn) */
567 TCGv addr = tcg_temp_new();
568 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
569 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
570 tcg_temp_free(addr);
572 return;
573 case 0x0005: /* mov.w Rm,@(R0,Rn) */
575 TCGv addr = tcg_temp_new();
576 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
577 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
578 tcg_temp_free(addr);
580 return;
581 case 0x0006: /* mov.l Rm,@(R0,Rn) */
583 TCGv addr = tcg_temp_new();
584 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
585 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
586 tcg_temp_free(addr);
588 return;
589 case 0x000c: /* mov.b @(R0,Rm),Rn */
591 TCGv addr = tcg_temp_new();
592 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
593 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
594 tcg_temp_free(addr);
596 return;
597 case 0x000d: /* mov.w @(R0,Rm),Rn */
599 TCGv addr = tcg_temp_new();
600 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
601 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
602 tcg_temp_free(addr);
604 return;
605 case 0x000e: /* mov.l @(R0,Rm),Rn */
607 TCGv addr = tcg_temp_new();
608 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
609 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
610 tcg_temp_free(addr);
612 return;
613 case 0x6008: /* swap.b Rm,Rn */
615 TCGv high, low;
616 high = tcg_temp_new();
617 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
618 low = tcg_temp_new();
619 tcg_gen_ext16u_i32(low, REG(B7_4));
620 tcg_gen_bswap16_i32(low, low);
621 tcg_gen_or_i32(REG(B11_8), high, low);
622 tcg_temp_free(low);
623 tcg_temp_free(high);
625 return;
626 case 0x6009: /* swap.w Rm,Rn */
627 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
628 return;
629 case 0x200d: /* xtrct Rm,Rn */
631 TCGv high, low;
632 high = tcg_temp_new();
633 tcg_gen_shli_i32(high, REG(B7_4), 16);
634 low = tcg_temp_new();
635 tcg_gen_shri_i32(low, REG(B11_8), 16);
636 tcg_gen_or_i32(REG(B11_8), high, low);
637 tcg_temp_free(low);
638 tcg_temp_free(high);
640 return;
641 case 0x300c: /* add Rm,Rn */
642 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
643 return;
644 case 0x300e: /* addc Rm,Rn */
646 TCGv t0, t1;
647 t0 = tcg_const_tl(0);
648 t1 = tcg_temp_new();
649 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
650 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
651 REG(B11_8), t0, t1, cpu_sr_t);
652 tcg_temp_free(t0);
653 tcg_temp_free(t1);
655 return;
656 case 0x300f: /* addv Rm,Rn */
658 TCGv t0, t1, t2;
659 t0 = tcg_temp_new();
660 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
661 t1 = tcg_temp_new();
662 tcg_gen_xor_i32(t1, t0, REG(B11_8));
663 t2 = tcg_temp_new();
664 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
665 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
666 tcg_temp_free(t2);
667 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
668 tcg_temp_free(t1);
669 tcg_gen_mov_i32(REG(B7_4), t0);
670 tcg_temp_free(t0);
672 return;
673 case 0x2009: /* and Rm,Rn */
674 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
675 return;
676 case 0x3000: /* cmp/eq Rm,Rn */
677 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
678 return;
679 case 0x3003: /* cmp/ge Rm,Rn */
680 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
681 return;
682 case 0x3007: /* cmp/gt Rm,Rn */
683 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
684 return;
685 case 0x3006: /* cmp/hi Rm,Rn */
686 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
687 return;
688 case 0x3002: /* cmp/hs Rm,Rn */
689 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
690 return;
691 case 0x200c: /* cmp/str Rm,Rn */
693 TCGv cmp1 = tcg_temp_new();
694 TCGv cmp2 = tcg_temp_new();
695 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
696 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
697 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cmp2, 0);
698 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
699 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
700 tcg_gen_or_i32(cpu_sr_t, cpu_sr_t, cmp2);
701 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
702 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
703 tcg_gen_or_i32(cpu_sr_t, cpu_sr_t, cmp2);
704 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
705 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
706 tcg_gen_or_i32(cpu_sr_t, cpu_sr_t, cmp2);
707 tcg_temp_free(cmp2);
708 tcg_temp_free(cmp1);
710 return;
711 case 0x2007: /* div0s Rm,Rn */
712 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
713 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
714 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
715 return;
716 case 0x3004: /* div1 Rm,Rn */
718 TCGv t0 = tcg_temp_new();
719 TCGv t1 = tcg_temp_new();
720 TCGv t2 = tcg_temp_new();
721 TCGv zero = tcg_const_i32(0);
723 /* shift left arg1, saving the bit being pushed out and inserting
724 T on the right */
725 tcg_gen_shri_i32(t0, REG(B11_8), 31);
726 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
727 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
729 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
730 using 64-bit temps, we compute arg0's high part from q ^ m, so
731 that it is 0x00000000 when adding the value or 0xffffffff when
732 subtracting it. */
733 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
734 tcg_gen_subi_i32(t1, t1, 1);
735 tcg_gen_neg_i32(t2, REG(B7_4));
736 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
737 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
739 /* compute T and Q depending on carry */
740 tcg_gen_andi_i32(t1, t1, 1);
741 tcg_gen_xor_i32(t1, t1, t0);
742 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
743 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
745 tcg_temp_free(zero);
746 tcg_temp_free(t2);
747 tcg_temp_free(t1);
748 tcg_temp_free(t0);
750 return;
751 case 0x300d: /* dmuls.l Rm,Rn */
752 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
753 return;
754 case 0x3005: /* dmulu.l Rm,Rn */
755 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
756 return;
757 case 0x600e: /* exts.b Rm,Rn */
758 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
759 return;
760 case 0x600f: /* exts.w Rm,Rn */
761 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
762 return;
763 case 0x600c: /* extu.b Rm,Rn */
764 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
765 return;
766 case 0x600d: /* extu.w Rm,Rn */
767 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
768 return;
769 case 0x000f: /* mac.l @Rm+,@Rn+ */
771 TCGv arg0, arg1;
772 arg0 = tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
774 arg1 = tcg_temp_new();
775 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
776 gen_helper_macl(cpu_env, arg0, arg1);
777 tcg_temp_free(arg1);
778 tcg_temp_free(arg0);
779 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
780 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
782 return;
783 case 0x400f: /* mac.w @Rm+,@Rn+ */
785 TCGv arg0, arg1;
786 arg0 = tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
788 arg1 = tcg_temp_new();
789 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
790 gen_helper_macw(cpu_env, arg0, arg1);
791 tcg_temp_free(arg1);
792 tcg_temp_free(arg0);
793 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
794 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
796 return;
797 case 0x0007: /* mul.l Rm,Rn */
798 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
799 return;
800 case 0x200f: /* muls.w Rm,Rn */
802 TCGv arg0, arg1;
803 arg0 = tcg_temp_new();
804 tcg_gen_ext16s_i32(arg0, REG(B7_4));
805 arg1 = tcg_temp_new();
806 tcg_gen_ext16s_i32(arg1, REG(B11_8));
807 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
808 tcg_temp_free(arg1);
809 tcg_temp_free(arg0);
811 return;
812 case 0x200e: /* mulu.w Rm,Rn */
814 TCGv arg0, arg1;
815 arg0 = tcg_temp_new();
816 tcg_gen_ext16u_i32(arg0, REG(B7_4));
817 arg1 = tcg_temp_new();
818 tcg_gen_ext16u_i32(arg1, REG(B11_8));
819 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
820 tcg_temp_free(arg1);
821 tcg_temp_free(arg0);
823 return;
824 case 0x600b: /* neg Rm,Rn */
825 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
826 return;
827 case 0x600a: /* negc Rm,Rn */
829 TCGv t0 = tcg_const_i32(0);
830 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
831 REG(B7_4), t0, cpu_sr_t, t0);
832 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
833 t0, t0, REG(B11_8), cpu_sr_t);
834 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
835 tcg_temp_free(t0);
837 return;
838 case 0x6007: /* not Rm,Rn */
839 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
840 return;
841 case 0x200b: /* or Rm,Rn */
842 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
843 return;
844 case 0x400c: /* shad Rm,Rn */
846 TCGLabel *label1 = gen_new_label();
847 TCGLabel *label2 = gen_new_label();
848 TCGLabel *label3 = gen_new_label();
849 TCGLabel *label4 = gen_new_label();
850 TCGv shift;
851 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
852 /* Rm positive, shift to the left */
853 shift = tcg_temp_new();
854 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
855 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
856 tcg_temp_free(shift);
857 tcg_gen_br(label4);
858 /* Rm negative, shift to the right */
859 gen_set_label(label1);
860 shift = tcg_temp_new();
861 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
862 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
863 tcg_gen_not_i32(shift, REG(B7_4));
864 tcg_gen_andi_i32(shift, shift, 0x1f);
865 tcg_gen_addi_i32(shift, shift, 1);
866 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
867 tcg_temp_free(shift);
868 tcg_gen_br(label4);
869 /* Rm = -32 */
870 gen_set_label(label2);
871 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
872 tcg_gen_movi_i32(REG(B11_8), 0);
873 tcg_gen_br(label4);
874 gen_set_label(label3);
875 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
876 gen_set_label(label4);
878 return;
879 case 0x400d: /* shld Rm,Rn */
881 TCGLabel *label1 = gen_new_label();
882 TCGLabel *label2 = gen_new_label();
883 TCGLabel *label3 = gen_new_label();
884 TCGv shift;
885 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
886 /* Rm positive, shift to the left */
887 shift = tcg_temp_new();
888 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
889 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
890 tcg_temp_free(shift);
891 tcg_gen_br(label3);
892 /* Rm negative, shift to the right */
893 gen_set_label(label1);
894 shift = tcg_temp_new();
895 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
896 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
897 tcg_gen_not_i32(shift, REG(B7_4));
898 tcg_gen_andi_i32(shift, shift, 0x1f);
899 tcg_gen_addi_i32(shift, shift, 1);
900 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
901 tcg_temp_free(shift);
902 tcg_gen_br(label3);
903 /* Rm = -32 */
904 gen_set_label(label2);
905 tcg_gen_movi_i32(REG(B11_8), 0);
906 gen_set_label(label3);
908 return;
909 case 0x3008: /* sub Rm,Rn */
910 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
911 return;
912 case 0x300a: /* subc Rm,Rn */
914 TCGv t0, t1;
915 t0 = tcg_const_tl(0);
916 t1 = tcg_temp_new();
917 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
918 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
919 REG(B11_8), t0, t1, cpu_sr_t);
920 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
921 tcg_temp_free(t0);
922 tcg_temp_free(t1);
924 return;
925 case 0x300b: /* subv Rm,Rn */
927 TCGv t0, t1, t2;
928 t0 = tcg_temp_new();
929 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
930 t1 = tcg_temp_new();
931 tcg_gen_xor_i32(t1, t0, REG(B7_4));
932 t2 = tcg_temp_new();
933 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
934 tcg_gen_and_i32(t1, t1, t2);
935 tcg_temp_free(t2);
936 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
937 tcg_temp_free(t1);
938 tcg_gen_mov_i32(REG(B11_8), t0);
939 tcg_temp_free(t0);
941 return;
942 case 0x2008: /* tst Rm,Rn */
944 TCGv val = tcg_temp_new();
945 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
946 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
947 tcg_temp_free(val);
949 return;
950 case 0x200a: /* xor Rm,Rn */
951 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
952 return;
953 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
954 CHECK_FPU_ENABLED
955 if (ctx->flags & FPSCR_SZ) {
956 TCGv_i64 fp = tcg_temp_new_i64();
957 gen_load_fpr64(fp, XREG(B7_4));
958 gen_store_fpr64(fp, XREG(B11_8));
959 tcg_temp_free_i64(fp);
960 } else {
961 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
963 return;
964 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
965 CHECK_FPU_ENABLED
966 if (ctx->flags & FPSCR_SZ) {
967 TCGv addr_hi = tcg_temp_new();
968 int fr = XREG(B7_4);
969 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
970 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
971 ctx->memidx, MO_TEUL);
972 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
973 ctx->memidx, MO_TEUL);
974 tcg_temp_free(addr_hi);
975 } else {
976 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
977 ctx->memidx, MO_TEUL);
979 return;
980 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
981 CHECK_FPU_ENABLED
982 if (ctx->flags & FPSCR_SZ) {
983 TCGv addr_hi = tcg_temp_new();
984 int fr = XREG(B11_8);
985 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
986 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
987 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
988 tcg_temp_free(addr_hi);
989 } else {
990 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
991 ctx->memidx, MO_TEUL);
993 return;
994 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
995 CHECK_FPU_ENABLED
996 if (ctx->flags & FPSCR_SZ) {
997 TCGv addr_hi = tcg_temp_new();
998 int fr = XREG(B11_8);
999 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1000 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1001 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1002 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1003 tcg_temp_free(addr_hi);
1004 } else {
1005 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1006 ctx->memidx, MO_TEUL);
1007 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1009 return;
1010 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1011 CHECK_FPU_ENABLED
1012 TCGv addr = tcg_temp_new_i32();
1013 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1014 if (ctx->flags & FPSCR_SZ) {
1015 int fr = XREG(B7_4);
1016 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1017 tcg_gen_subi_i32(addr, addr, 4);
1018 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1019 } else {
1020 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1021 ctx->memidx, MO_TEUL);
1023 tcg_gen_mov_i32(REG(B11_8), addr);
1024 tcg_temp_free(addr);
1025 return;
1026 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1027 CHECK_FPU_ENABLED
1029 TCGv addr = tcg_temp_new_i32();
1030 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1031 if (ctx->flags & FPSCR_SZ) {
1032 int fr = XREG(B11_8);
1033 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1034 ctx->memidx, MO_TEUL);
1035 tcg_gen_addi_i32(addr, addr, 4);
1036 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1037 ctx->memidx, MO_TEUL);
1038 } else {
1039 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1040 ctx->memidx, MO_TEUL);
1042 tcg_temp_free(addr);
1044 return;
1045 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1046 CHECK_FPU_ENABLED
1048 TCGv addr = tcg_temp_new();
1049 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1050 if (ctx->flags & FPSCR_SZ) {
1051 int fr = XREG(B7_4);
1052 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1053 ctx->memidx, MO_TEUL);
1054 tcg_gen_addi_i32(addr, addr, 4);
1055 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1056 ctx->memidx, MO_TEUL);
1057 } else {
1058 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1059 ctx->memidx, MO_TEUL);
1061 tcg_temp_free(addr);
1063 return;
1064 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1065 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1066 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1067 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1068 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1069 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1071 CHECK_FPU_ENABLED
1072 if (ctx->flags & FPSCR_PR) {
1073 TCGv_i64 fp0, fp1;
1075 if (ctx->opcode & 0x0110)
1076 break; /* illegal instruction */
1077 fp0 = tcg_temp_new_i64();
1078 fp1 = tcg_temp_new_i64();
1079 gen_load_fpr64(fp0, DREG(B11_8));
1080 gen_load_fpr64(fp1, DREG(B7_4));
1081 switch (ctx->opcode & 0xf00f) {
1082 case 0xf000: /* fadd Rm,Rn */
1083 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1084 break;
1085 case 0xf001: /* fsub Rm,Rn */
1086 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1087 break;
1088 case 0xf002: /* fmul Rm,Rn */
1089 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1090 break;
1091 case 0xf003: /* fdiv Rm,Rn */
1092 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1093 break;
1094 case 0xf004: /* fcmp/eq Rm,Rn */
1095 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1096 return;
1097 case 0xf005: /* fcmp/gt Rm,Rn */
1098 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1099 return;
1101 gen_store_fpr64(fp0, DREG(B11_8));
1102 tcg_temp_free_i64(fp0);
1103 tcg_temp_free_i64(fp1);
1104 } else {
1105 switch (ctx->opcode & 0xf00f) {
1106 case 0xf000: /* fadd Rm,Rn */
1107 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1108 cpu_fregs[FREG(B11_8)],
1109 cpu_fregs[FREG(B7_4)]);
1110 break;
1111 case 0xf001: /* fsub Rm,Rn */
1112 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1113 cpu_fregs[FREG(B11_8)],
1114 cpu_fregs[FREG(B7_4)]);
1115 break;
1116 case 0xf002: /* fmul Rm,Rn */
1117 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1118 cpu_fregs[FREG(B11_8)],
1119 cpu_fregs[FREG(B7_4)]);
1120 break;
1121 case 0xf003: /* fdiv Rm,Rn */
1122 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1123 cpu_fregs[FREG(B11_8)],
1124 cpu_fregs[FREG(B7_4)]);
1125 break;
1126 case 0xf004: /* fcmp/eq Rm,Rn */
1127 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1128 cpu_fregs[FREG(B7_4)]);
1129 return;
1130 case 0xf005: /* fcmp/gt Rm,Rn */
1131 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1132 cpu_fregs[FREG(B7_4)]);
1133 return;
1137 return;
1138 case 0xf00e: /* fmac FR0,RM,Rn */
1140 CHECK_FPU_ENABLED
1141 if (ctx->flags & FPSCR_PR) {
1142 break; /* illegal instruction */
1143 } else {
1144 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1145 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1146 cpu_fregs[FREG(B11_8)]);
1147 return;
1152 switch (ctx->opcode & 0xff00) {
1153 case 0xc900: /* and #imm,R0 */
1154 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1155 return;
1156 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1158 TCGv addr, val;
1159 addr = tcg_temp_new();
1160 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1161 val = tcg_temp_new();
1162 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1163 tcg_gen_andi_i32(val, val, B7_0);
1164 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1165 tcg_temp_free(val);
1166 tcg_temp_free(addr);
1168 return;
1169 case 0x8b00: /* bf label */
1170 CHECK_NOT_DELAY_SLOT
1171 gen_conditional_jump(ctx, ctx->pc + 2,
1172 ctx->pc + 4 + B7_0s * 2);
1173 ctx->bstate = BS_BRANCH;
1174 return;
1175 case 0x8f00: /* bf/s label */
1176 CHECK_NOT_DELAY_SLOT
1177 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1178 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1179 return;
1180 case 0x8900: /* bt label */
1181 CHECK_NOT_DELAY_SLOT
1182 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1183 ctx->pc + 2);
1184 ctx->bstate = BS_BRANCH;
1185 return;
1186 case 0x8d00: /* bt/s label */
1187 CHECK_NOT_DELAY_SLOT
1188 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1189 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1190 return;
1191 case 0x8800: /* cmp/eq #imm,R0 */
1192 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1193 return;
1194 case 0xc400: /* mov.b @(disp,GBR),R0 */
1196 TCGv addr = tcg_temp_new();
1197 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1198 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1199 tcg_temp_free(addr);
1201 return;
1202 case 0xc500: /* mov.w @(disp,GBR),R0 */
1204 TCGv addr = tcg_temp_new();
1205 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1206 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1207 tcg_temp_free(addr);
1209 return;
1210 case 0xc600: /* mov.l @(disp,GBR),R0 */
1212 TCGv addr = tcg_temp_new();
1213 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1214 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1215 tcg_temp_free(addr);
1217 return;
1218 case 0xc000: /* mov.b R0,@(disp,GBR) */
1220 TCGv addr = tcg_temp_new();
1221 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1222 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1223 tcg_temp_free(addr);
1225 return;
1226 case 0xc100: /* mov.w R0,@(disp,GBR) */
1228 TCGv addr = tcg_temp_new();
1229 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1230 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1231 tcg_temp_free(addr);
1233 return;
1234 case 0xc200: /* mov.l R0,@(disp,GBR) */
1236 TCGv addr = tcg_temp_new();
1237 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1238 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1239 tcg_temp_free(addr);
1241 return;
1242 case 0x8000: /* mov.b R0,@(disp,Rn) */
1244 TCGv addr = tcg_temp_new();
1245 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1246 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1247 tcg_temp_free(addr);
1249 return;
1250 case 0x8100: /* mov.w R0,@(disp,Rn) */
1252 TCGv addr = tcg_temp_new();
1253 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1254 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1255 tcg_temp_free(addr);
1257 return;
1258 case 0x8400: /* mov.b @(disp,Rn),R0 */
1260 TCGv addr = tcg_temp_new();
1261 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1262 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1263 tcg_temp_free(addr);
1265 return;
1266 case 0x8500: /* mov.w @(disp,Rn),R0 */
1268 TCGv addr = tcg_temp_new();
1269 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1270 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1271 tcg_temp_free(addr);
1273 return;
1274 case 0xc700: /* mova @(disp,PC),R0 */
1275 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1276 return;
1277 case 0xcb00: /* or #imm,R0 */
1278 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1279 return;
1280 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1282 TCGv addr, val;
1283 addr = tcg_temp_new();
1284 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1285 val = tcg_temp_new();
1286 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1287 tcg_gen_ori_i32(val, val, B7_0);
1288 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1289 tcg_temp_free(val);
1290 tcg_temp_free(addr);
1292 return;
1293 case 0xc300: /* trapa #imm */
1295 TCGv imm;
1296 CHECK_NOT_DELAY_SLOT
1297 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1298 imm = tcg_const_i32(B7_0);
1299 gen_helper_trapa(cpu_env, imm);
1300 tcg_temp_free(imm);
1301 ctx->bstate = BS_BRANCH;
1303 return;
1304 case 0xc800: /* tst #imm,R0 */
1306 TCGv val = tcg_temp_new();
1307 tcg_gen_andi_i32(val, REG(0), B7_0);
1308 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1309 tcg_temp_free(val);
1311 return;
1312 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1314 TCGv val = tcg_temp_new();
1315 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1316 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1317 tcg_gen_andi_i32(val, val, B7_0);
1318 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1319 tcg_temp_free(val);
1321 return;
1322 case 0xca00: /* xor #imm,R0 */
1323 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1324 return;
1325 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1327 TCGv addr, val;
1328 addr = tcg_temp_new();
1329 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1330 val = tcg_temp_new();
1331 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1332 tcg_gen_xori_i32(val, val, B7_0);
1333 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1334 tcg_temp_free(val);
1335 tcg_temp_free(addr);
1337 return;
1340 switch (ctx->opcode & 0xf08f) {
1341 case 0x408e: /* ldc Rm,Rn_BANK */
1342 CHECK_PRIVILEGED
1343 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1344 return;
1345 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1346 CHECK_PRIVILEGED
1347 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1348 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1349 return;
1350 case 0x0082: /* stc Rm_BANK,Rn */
1351 CHECK_PRIVILEGED
1352 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1353 return;
1354 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1355 CHECK_PRIVILEGED
1357 TCGv addr = tcg_temp_new();
1358 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1359 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1360 tcg_gen_mov_i32(REG(B11_8), addr);
1361 tcg_temp_free(addr);
1363 return;
1366 switch (ctx->opcode & 0xf0ff) {
1367 case 0x0023: /* braf Rn */
1368 CHECK_NOT_DELAY_SLOT
1369 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1370 ctx->flags |= DELAY_SLOT;
1371 ctx->delayed_pc = (uint32_t) - 1;
1372 return;
1373 case 0x0003: /* bsrf Rn */
1374 CHECK_NOT_DELAY_SLOT
1375 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1376 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1377 ctx->flags |= DELAY_SLOT;
1378 ctx->delayed_pc = (uint32_t) - 1;
1379 return;
1380 case 0x4015: /* cmp/pl Rn */
1381 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1382 return;
1383 case 0x4011: /* cmp/pz Rn */
1384 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1385 return;
1386 case 0x4010: /* dt Rn */
1387 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1388 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1389 return;
1390 case 0x402b: /* jmp @Rn */
1391 CHECK_NOT_DELAY_SLOT
1392 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1393 ctx->flags |= DELAY_SLOT;
1394 ctx->delayed_pc = (uint32_t) - 1;
1395 return;
1396 case 0x400b: /* jsr @Rn */
1397 CHECK_NOT_DELAY_SLOT
1398 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1399 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1400 ctx->flags |= DELAY_SLOT;
1401 ctx->delayed_pc = (uint32_t) - 1;
1402 return;
1403 case 0x400e: /* ldc Rm,SR */
1404 CHECK_PRIVILEGED
1406 TCGv val = tcg_temp_new();
1407 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1408 gen_write_sr(val);
1409 tcg_temp_free(val);
1410 ctx->bstate = BS_STOP;
1412 return;
1413 case 0x4007: /* ldc.l @Rm+,SR */
1414 CHECK_PRIVILEGED
1416 TCGv val = tcg_temp_new();
1417 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1418 tcg_gen_andi_i32(val, val, 0x700083f3);
1419 gen_write_sr(val);
1420 tcg_temp_free(val);
1421 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1422 ctx->bstate = BS_STOP;
1424 return;
1425 case 0x0002: /* stc SR,Rn */
1426 CHECK_PRIVILEGED
1427 gen_read_sr(REG(B11_8));
1428 return;
1429 case 0x4003: /* stc SR,@-Rn */
1430 CHECK_PRIVILEGED
1432 TCGv addr = tcg_temp_new();
1433 TCGv val = tcg_temp_new();
1434 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1435 gen_read_sr(val);
1436 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1437 tcg_gen_mov_i32(REG(B11_8), addr);
1438 tcg_temp_free(val);
1439 tcg_temp_free(addr);
1441 return;
1442 #define LD(reg,ldnum,ldpnum,prechk) \
1443 case ldnum: \
1444 prechk \
1445 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1446 return; \
1447 case ldpnum: \
1448 prechk \
1449 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1450 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1451 return;
1452 #define ST(reg,stnum,stpnum,prechk) \
1453 case stnum: \
1454 prechk \
1455 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1456 return; \
1457 case stpnum: \
1458 prechk \
1460 TCGv addr = tcg_temp_new(); \
1461 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1462 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1463 tcg_gen_mov_i32(REG(B11_8), addr); \
1464 tcg_temp_free(addr); \
1466 return;
1467 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1468 LD(reg,ldnum,ldpnum,prechk) \
1469 ST(reg,stnum,stpnum,prechk)
1470 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1471 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1472 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1473 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1474 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1475 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1476 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1477 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1478 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1479 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1480 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1481 case 0x406a: /* lds Rm,FPSCR */
1482 CHECK_FPU_ENABLED
1483 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1484 ctx->bstate = BS_STOP;
1485 return;
1486 case 0x4066: /* lds.l @Rm+,FPSCR */
1487 CHECK_FPU_ENABLED
1489 TCGv addr = tcg_temp_new();
1490 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1491 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1492 gen_helper_ld_fpscr(cpu_env, addr);
1493 tcg_temp_free(addr);
1494 ctx->bstate = BS_STOP;
1496 return;
1497 case 0x006a: /* sts FPSCR,Rn */
1498 CHECK_FPU_ENABLED
1499 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1500 return;
1501 case 0x4062: /* sts FPSCR,@-Rn */
1502 CHECK_FPU_ENABLED
1504 TCGv addr, val;
1505 val = tcg_temp_new();
1506 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1507 addr = tcg_temp_new();
1508 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1509 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1510 tcg_gen_mov_i32(REG(B11_8), addr);
1511 tcg_temp_free(addr);
1512 tcg_temp_free(val);
1514 return;
1515 case 0x00c3: /* movca.l R0,@Rm */
1517 TCGv val = tcg_temp_new();
1518 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1519 gen_helper_movcal(cpu_env, REG(B11_8), val);
1520 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1522 ctx->has_movcal = 1;
1523 return;
1524 case 0x40a9:
1525 /* MOVUA.L @Rm,R0 (Rm) -> R0
1526 Load non-boundary-aligned data */
1527 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1528 return;
1529 case 0x40e9:
1530 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1531 Load non-boundary-aligned data */
1532 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1533 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1534 return;
1535 case 0x0029: /* movt Rn */
1536 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1537 return;
1538 case 0x0073:
1539 /* MOVCO.L
1540 LDST -> T
1541 If (T == 1) R0 -> (Rn)
1542 0 -> LDST
1544 if (ctx->features & SH_FEATURE_SH4A) {
1545 TCGLabel *label = gen_new_label();
1546 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1547 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1548 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1549 gen_set_label(label);
1550 tcg_gen_movi_i32(cpu_ldst, 0);
1551 return;
1552 } else
1553 break;
1554 case 0x0063:
1555 /* MOVLI.L @Rm,R0
1556 1 -> LDST
1557 (Rm) -> R0
1558 When interrupt/exception
1559 occurred 0 -> LDST
1561 if (ctx->features & SH_FEATURE_SH4A) {
1562 tcg_gen_movi_i32(cpu_ldst, 0);
1563 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1564 tcg_gen_movi_i32(cpu_ldst, 1);
1565 return;
1566 } else
1567 break;
1568 case 0x0093: /* ocbi @Rn */
1570 gen_helper_ocbi(cpu_env, REG(B11_8));
1572 return;
1573 case 0x00a3: /* ocbp @Rn */
1574 case 0x00b3: /* ocbwb @Rn */
1575 /* These instructions are supposed to do nothing in case of
1576 a cache miss. Given that we only partially emulate caches
1577 it is safe to simply ignore them. */
1578 return;
1579 case 0x0083: /* pref @Rn */
1580 return;
1581 case 0x00d3: /* prefi @Rn */
1582 if (ctx->features & SH_FEATURE_SH4A)
1583 return;
1584 else
1585 break;
1586 case 0x00e3: /* icbi @Rn */
1587 if (ctx->features & SH_FEATURE_SH4A)
1588 return;
1589 else
1590 break;
1591 case 0x00ab: /* synco */
1592 if (ctx->features & SH_FEATURE_SH4A)
1593 return;
1594 else
1595 break;
1596 case 0x4024: /* rotcl Rn */
1598 TCGv tmp = tcg_temp_new();
1599 tcg_gen_mov_i32(tmp, cpu_sr_t);
1600 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1601 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1602 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1603 tcg_temp_free(tmp);
1605 return;
1606 case 0x4025: /* rotcr Rn */
1608 TCGv tmp = tcg_temp_new();
1609 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1610 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1611 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1612 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1613 tcg_temp_free(tmp);
1615 return;
1616 case 0x4004: /* rotl Rn */
1617 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1618 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1619 return;
1620 case 0x4005: /* rotr Rn */
1621 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1622 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1623 return;
1624 case 0x4000: /* shll Rn */
1625 case 0x4020: /* shal Rn */
1626 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1627 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1628 return;
1629 case 0x4021: /* shar Rn */
1630 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1631 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1632 return;
1633 case 0x4001: /* shlr Rn */
1634 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1635 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1636 return;
1637 case 0x4008: /* shll2 Rn */
1638 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1639 return;
1640 case 0x4018: /* shll8 Rn */
1641 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1642 return;
1643 case 0x4028: /* shll16 Rn */
1644 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1645 return;
1646 case 0x4009: /* shlr2 Rn */
1647 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1648 return;
1649 case 0x4019: /* shlr8 Rn */
1650 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1651 return;
1652 case 0x4029: /* shlr16 Rn */
1653 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1654 return;
1655 case 0x401b: /* tas.b @Rn */
1657 TCGv addr, val;
1658 addr = tcg_temp_local_new();
1659 tcg_gen_mov_i32(addr, REG(B11_8));
1660 val = tcg_temp_local_new();
1661 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1662 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1663 tcg_gen_ori_i32(val, val, 0x80);
1664 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1665 tcg_temp_free(val);
1666 tcg_temp_free(addr);
1668 return;
1669 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1670 CHECK_FPU_ENABLED
1671 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1672 return;
1673 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1674 CHECK_FPU_ENABLED
1675 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1676 return;
1677 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1678 CHECK_FPU_ENABLED
1679 if (ctx->flags & FPSCR_PR) {
1680 TCGv_i64 fp;
1681 if (ctx->opcode & 0x0100)
1682 break; /* illegal instruction */
1683 fp = tcg_temp_new_i64();
1684 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1685 gen_store_fpr64(fp, DREG(B11_8));
1686 tcg_temp_free_i64(fp);
1688 else {
1689 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1691 return;
1692 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1693 CHECK_FPU_ENABLED
1694 if (ctx->flags & FPSCR_PR) {
1695 TCGv_i64 fp;
1696 if (ctx->opcode & 0x0100)
1697 break; /* illegal instruction */
1698 fp = tcg_temp_new_i64();
1699 gen_load_fpr64(fp, DREG(B11_8));
1700 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1701 tcg_temp_free_i64(fp);
1703 else {
1704 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1706 return;
1707 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1708 CHECK_FPU_ENABLED
1710 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1712 return;
1713 case 0xf05d: /* fabs FRn/DRn */
1714 CHECK_FPU_ENABLED
1715 if (ctx->flags & FPSCR_PR) {
1716 if (ctx->opcode & 0x0100)
1717 break; /* illegal instruction */
1718 TCGv_i64 fp = tcg_temp_new_i64();
1719 gen_load_fpr64(fp, DREG(B11_8));
1720 gen_helper_fabs_DT(fp, fp);
1721 gen_store_fpr64(fp, DREG(B11_8));
1722 tcg_temp_free_i64(fp);
1723 } else {
1724 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1726 return;
1727 case 0xf06d: /* fsqrt FRn */
1728 CHECK_FPU_ENABLED
1729 if (ctx->flags & FPSCR_PR) {
1730 if (ctx->opcode & 0x0100)
1731 break; /* illegal instruction */
1732 TCGv_i64 fp = tcg_temp_new_i64();
1733 gen_load_fpr64(fp, DREG(B11_8));
1734 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1735 gen_store_fpr64(fp, DREG(B11_8));
1736 tcg_temp_free_i64(fp);
1737 } else {
1738 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1739 cpu_fregs[FREG(B11_8)]);
1741 return;
1742 case 0xf07d: /* fsrra FRn */
1743 CHECK_FPU_ENABLED
1744 break;
1745 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1746 CHECK_FPU_ENABLED
1747 if (!(ctx->flags & FPSCR_PR)) {
1748 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1750 return;
1751 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1752 CHECK_FPU_ENABLED
1753 if (!(ctx->flags & FPSCR_PR)) {
1754 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1756 return;
1757 case 0xf0ad: /* fcnvsd FPUL,DRn */
1758 CHECK_FPU_ENABLED
1760 TCGv_i64 fp = tcg_temp_new_i64();
1761 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1762 gen_store_fpr64(fp, DREG(B11_8));
1763 tcg_temp_free_i64(fp);
1765 return;
1766 case 0xf0bd: /* fcnvds DRn,FPUL */
1767 CHECK_FPU_ENABLED
1769 TCGv_i64 fp = tcg_temp_new_i64();
1770 gen_load_fpr64(fp, DREG(B11_8));
1771 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1772 tcg_temp_free_i64(fp);
1774 return;
1775 case 0xf0ed: /* fipr FVm,FVn */
1776 CHECK_FPU_ENABLED
1777 if ((ctx->flags & FPSCR_PR) == 0) {
1778 TCGv m, n;
1779 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1780 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1781 gen_helper_fipr(cpu_env, m, n);
1782 tcg_temp_free(m);
1783 tcg_temp_free(n);
1784 return;
1786 break;
1787 case 0xf0fd: /* ftrv XMTRX,FVn */
1788 CHECK_FPU_ENABLED
1789 if ((ctx->opcode & 0x0300) == 0x0100 &&
1790 (ctx->flags & FPSCR_PR) == 0) {
1791 TCGv n;
1792 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1793 gen_helper_ftrv(cpu_env, n);
1794 tcg_temp_free(n);
1795 return;
1797 break;
1799 #if 0
1800 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1801 ctx->opcode, ctx->pc);
1802 fflush(stderr);
1803 #endif
1804 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1805 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1806 gen_helper_raise_slot_illegal_instruction(cpu_env);
1807 } else {
1808 gen_helper_raise_illegal_instruction(cpu_env);
1810 ctx->bstate = BS_BRANCH;
1813 static void decode_opc(DisasContext * ctx)
1815 uint32_t old_flags = ctx->flags;
1817 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1818 tcg_gen_debug_insn_start(ctx->pc);
1821 _decode_opc(ctx);
1823 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1824 if (ctx->flags & DELAY_SLOT_CLEARME) {
1825 gen_store_flags(0);
1826 } else {
1827 /* go out of the delay slot */
1828 uint32_t new_flags = ctx->flags;
1829 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1830 gen_store_flags(new_flags);
1832 ctx->flags = 0;
1833 ctx->bstate = BS_BRANCH;
1834 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1835 gen_delayed_conditional_jump(ctx);
1836 } else if (old_flags & DELAY_SLOT) {
1837 gen_jump(ctx);
1842 /* go into a delay slot */
1843 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1844 gen_store_flags(ctx->flags);
1847 static inline void
1848 gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
1849 bool search_pc)
1851 CPUState *cs = CPU(cpu);
1852 CPUSH4State *env = &cpu->env;
1853 DisasContext ctx;
1854 target_ulong pc_start;
1855 CPUBreakpoint *bp;
1856 int i, ii;
1857 int num_insns;
1858 int max_insns;
1860 pc_start = tb->pc;
1861 ctx.pc = pc_start;
1862 ctx.flags = (uint32_t)tb->flags;
1863 ctx.bstate = BS_NONE;
1864 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1865 /* We don't know if the delayed pc came from a dynamic or static branch,
1866 so assume it is a dynamic branch. */
1867 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1868 ctx.tb = tb;
1869 ctx.singlestep_enabled = cs->singlestep_enabled;
1870 ctx.features = env->features;
1871 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1873 ii = -1;
1874 num_insns = 0;
1875 max_insns = tb->cflags & CF_COUNT_MASK;
1876 if (max_insns == 0)
1877 max_insns = CF_COUNT_MASK;
1878 gen_tb_start(tb);
1879 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1880 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1881 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1882 if (ctx.pc == bp->pc) {
1883 /* We have hit a breakpoint - make sure PC is up-to-date */
1884 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1885 gen_helper_debug(cpu_env);
1886 ctx.bstate = BS_BRANCH;
1887 break;
1891 if (search_pc) {
1892 i = tcg_op_buf_count();
1893 if (ii < i) {
1894 ii++;
1895 while (ii < i)
1896 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1898 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1899 gen_opc_hflags[ii] = ctx.flags;
1900 tcg_ctx.gen_opc_instr_start[ii] = 1;
1901 tcg_ctx.gen_opc_icount[ii] = num_insns;
1903 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1904 gen_io_start();
1905 #if 0
1906 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1907 fflush(stderr);
1908 #endif
1909 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1910 decode_opc(&ctx);
1911 num_insns++;
1912 ctx.pc += 2;
1913 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1914 break;
1915 if (cs->singlestep_enabled) {
1916 break;
1918 if (num_insns >= max_insns)
1919 break;
1920 if (singlestep)
1921 break;
1923 if (tb->cflags & CF_LAST_IO)
1924 gen_io_end();
1925 if (cs->singlestep_enabled) {
1926 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1927 gen_helper_debug(cpu_env);
1928 } else {
1929 switch (ctx.bstate) {
1930 case BS_STOP:
1931 /* gen_op_interrupt_restart(); */
1932 /* fall through */
1933 case BS_NONE:
1934 if (ctx.flags) {
1935 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1937 gen_goto_tb(&ctx, 0, ctx.pc);
1938 break;
1939 case BS_EXCP:
1940 /* gen_op_interrupt_restart(); */
1941 tcg_gen_exit_tb(0);
1942 break;
1943 case BS_BRANCH:
1944 default:
1945 break;
1949 gen_tb_end(tb, num_insns);
1951 if (search_pc) {
1952 i = tcg_op_buf_count();
1953 ii++;
1954 while (ii <= i)
1955 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1956 } else {
1957 tb->size = ctx.pc - pc_start;
1958 tb->icount = num_insns;
1961 #ifdef DEBUG_DISAS
1962 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1963 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1964 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1965 qemu_log("\n");
1967 #endif
1970 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1972 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
1975 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
1977 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
1980 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
1982 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1983 env->flags = gen_opc_hflags[pc_pos];