Update OpenBIOS images to fbc1b4a built from submodule.
[qemu/ar7.git] / target / sh4 / translate.c
blob8bc132b27b9a3b73f762d8eb1a55fc3c37ddeab0
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
39 uint16_t opcode;
40 uint32_t tbflags; /* should stay unmodified during the TB translation */
41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
42 int bstate;
43 int memidx;
44 uint32_t delayed_pc;
45 int singlestep_enabled;
46 uint32_t features;
47 int has_movcal;
48 } DisasContext;
50 #if defined(CONFIG_USER_ONLY)
51 #define IS_USER(ctx) 1
52 #else
53 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
54 #endif
56 enum {
57 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
58 * exception condition
60 BS_STOP = 1, /* We want to stop translation for any reason */
61 BS_BRANCH = 2, /* We reached a branch condition */
62 BS_EXCP = 3, /* We reached an exception condition */
65 /* global register indexes */
66 static TCGv_env cpu_env;
67 static TCGv cpu_gregs[24];
68 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
69 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
70 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
71 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
72 static TCGv cpu_fregs[32];
74 /* internal register indexes */
75 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
77 #include "exec/gen-icount.h"
79 void sh4_translate_init(void)
81 int i;
82 static int done_init = 0;
83 static const char * const gregnames[24] = {
84 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
85 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
86 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
87 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
88 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
90 static const char * const fregnames[32] = {
91 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
92 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
93 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
94 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
95 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
96 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
97 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
98 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 if (done_init)
102 return;
104 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
105 tcg_ctx.tcg_env = cpu_env;
107 for (i = 0; i < 24; i++)
108 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, gregs[i]),
110 gregnames[i]);
112 cpu_pc = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, pc), "PC");
114 cpu_sr = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, sr), "SR");
116 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, sr_m), "SR_M");
118 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, sr_q), "SR_Q");
120 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, sr_t), "SR_T");
122 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, ssr), "SSR");
124 cpu_spc = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, spc), "SPC");
126 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, gbr), "GBR");
128 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, vbr), "VBR");
130 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, sgr), "SGR");
132 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, dbr), "DBR");
134 cpu_mach = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, mach), "MACH");
136 cpu_macl = tcg_global_mem_new_i32(cpu_env,
137 offsetof(CPUSH4State, macl), "MACL");
138 cpu_pr = tcg_global_mem_new_i32(cpu_env,
139 offsetof(CPUSH4State, pr), "PR");
140 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
141 offsetof(CPUSH4State, fpscr), "FPSCR");
142 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State, fpul), "FPUL");
145 cpu_flags = tcg_global_mem_new_i32(cpu_env,
146 offsetof(CPUSH4State, flags), "_flags_");
147 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, delayed_pc),
149 "_delayed_pc_");
150 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State,
152 delayed_cond),
153 "_delayed_cond_");
154 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, ldst), "_ldst_");
157 for (i = 0; i < 32; i++)
158 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
159 offsetof(CPUSH4State, fregs[i]),
160 fregnames[i]);
162 done_init = 1;
165 void superh_cpu_dump_state(CPUState *cs, FILE *f,
166 fprintf_function cpu_fprintf, int flags)
168 SuperHCPU *cpu = SUPERH_CPU(cs);
169 CPUSH4State *env = &cpu->env;
170 int i;
171 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
172 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
173 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
174 env->spc, env->ssr, env->gbr, env->vbr);
175 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
176 env->sgr, env->dbr, env->delayed_pc, env->fpul);
177 for (i = 0; i < 24; i += 4) {
178 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
179 i, env->gregs[i], i + 1, env->gregs[i + 1],
180 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
182 if (env->flags & DELAY_SLOT) {
183 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
185 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
186 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
187 env->delayed_pc);
188 } else if (env->flags & DELAY_SLOT_RTE) {
189 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
190 env->delayed_pc);
194 static void gen_read_sr(TCGv dst)
196 TCGv t0 = tcg_temp_new();
197 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
198 tcg_gen_or_i32(dst, dst, t0);
199 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
200 tcg_gen_or_i32(dst, dst, t0);
201 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
202 tcg_gen_or_i32(dst, cpu_sr, t0);
203 tcg_temp_free_i32(t0);
206 static void gen_write_sr(TCGv src)
208 tcg_gen_andi_i32(cpu_sr, src,
209 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
210 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
211 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
212 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
215 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
217 if (save_pc) {
218 tcg_gen_movi_i32(cpu_pc, ctx->pc);
220 if (ctx->delayed_pc != (uint32_t) -1) {
221 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
223 if ((ctx->tbflags & DELAY_SLOT_MASK) != ctx->envflags) {
224 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
228 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
230 if (unlikely(ctx->singlestep_enabled)) {
231 return false;
234 #ifndef CONFIG_USER_ONLY
235 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
236 #else
237 return true;
238 #endif
241 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
243 if (use_goto_tb(ctx, dest)) {
244 /* Use a direct jump if in same page and singlestep not enabled */
245 tcg_gen_goto_tb(n);
246 tcg_gen_movi_i32(cpu_pc, dest);
247 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
248 } else {
249 tcg_gen_movi_i32(cpu_pc, dest);
250 if (ctx->singlestep_enabled)
251 gen_helper_debug(cpu_env);
252 tcg_gen_exit_tb(0);
256 static void gen_jump(DisasContext * ctx)
258 if (ctx->delayed_pc == (uint32_t) - 1) {
259 /* Target is not statically known, it comes necessarily from a
260 delayed jump as immediate jump are conditinal jumps */
261 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
262 tcg_gen_discard_i32(cpu_delayed_pc);
263 if (ctx->singlestep_enabled)
264 gen_helper_debug(cpu_env);
265 tcg_gen_exit_tb(0);
266 } else {
267 gen_goto_tb(ctx, 0, ctx->delayed_pc);
271 /* Immediate conditional jump (bt or bf) */
272 static void gen_conditional_jump(DisasContext * ctx,
273 target_ulong ift, target_ulong ifnott)
275 TCGLabel *l1 = gen_new_label();
276 gen_save_cpu_state(ctx, false);
277 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
278 gen_goto_tb(ctx, 0, ifnott);
279 gen_set_label(l1);
280 gen_goto_tb(ctx, 1, ift);
281 ctx->bstate = BS_BRANCH;
284 /* Delayed conditional jump (bt or bf) */
285 static void gen_delayed_conditional_jump(DisasContext * ctx)
287 TCGLabel *l1;
288 TCGv ds;
290 l1 = gen_new_label();
291 ds = tcg_temp_new();
292 tcg_gen_mov_i32(ds, cpu_delayed_cond);
293 tcg_gen_discard_i32(cpu_delayed_cond);
294 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
295 gen_goto_tb(ctx, 1, ctx->pc + 2);
296 gen_set_label(l1);
297 gen_jump(ctx);
300 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
302 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
305 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
307 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
310 #define B3_0 (ctx->opcode & 0xf)
311 #define B6_4 ((ctx->opcode >> 4) & 0x7)
312 #define B7_4 ((ctx->opcode >> 4) & 0xf)
313 #define B7_0 (ctx->opcode & 0xff)
314 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
315 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
316 (ctx->opcode & 0xfff))
317 #define B11_8 ((ctx->opcode >> 8) & 0xf)
318 #define B15_12 ((ctx->opcode >> 12) & 0xf)
320 #define REG(x) ((x) < 8 && (ctx->tbflags & (1u << SR_MD))\
321 && (ctx->tbflags & (1u << SR_RB))\
322 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
324 #define ALTREG(x) ((x) < 8 && (!(ctx->tbflags & (1u << SR_MD))\
325 || !(ctx->tbflags & (1u << SR_RB)))\
326 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328 #define FREG(x) (ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x))
329 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
330 #define XREG(x) (ctx->tbflags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
331 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
333 #define CHECK_NOT_DELAY_SLOT \
334 if (ctx->envflags & DELAY_SLOT_MASK) { \
335 gen_save_cpu_state(ctx, true); \
336 gen_helper_raise_slot_illegal_instruction(cpu_env); \
337 ctx->bstate = BS_EXCP; \
338 return; \
341 #define CHECK_PRIVILEGED \
342 if (IS_USER(ctx)) { \
343 gen_save_cpu_state(ctx, true); \
344 if (ctx->envflags & DELAY_SLOT_MASK) { \
345 gen_helper_raise_slot_illegal_instruction(cpu_env); \
346 } else { \
347 gen_helper_raise_illegal_instruction(cpu_env); \
349 ctx->bstate = BS_EXCP; \
350 return; \
353 #define CHECK_FPU_ENABLED \
354 if (ctx->tbflags & (1u << SR_FD)) { \
355 gen_save_cpu_state(ctx, true); \
356 if (ctx->envflags & DELAY_SLOT_MASK) { \
357 gen_helper_raise_slot_fpu_disable(cpu_env); \
358 } else { \
359 gen_helper_raise_fpu_disable(cpu_env); \
361 ctx->bstate = BS_EXCP; \
362 return; \
365 static void _decode_opc(DisasContext * ctx)
367 /* This code tries to make movcal emulation sufficiently
368 accurate for Linux purposes. This instruction writes
369 memory, and prior to that, always allocates a cache line.
370 It is used in two contexts:
371 - in memcpy, where data is copied in blocks, the first write
372 of to a block uses movca.l for performance.
373 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
374 to flush the cache. Here, the data written by movcal.l is never
375 written to memory, and the data written is just bogus.
377 To simulate this, we simulate movcal.l, we store the value to memory,
378 but we also remember the previous content. If we see ocbi, we check
379 if movcal.l for that address was done previously. If so, the write should
380 not have hit the memory, so we restore the previous content.
381 When we see an instruction that is neither movca.l
382 nor ocbi, the previous content is discarded.
384 To optimize, we only try to flush stores when we're at the start of
385 TB, or if we already saw movca.l in this TB and did not flush stores
386 yet. */
387 if (ctx->has_movcal)
389 int opcode = ctx->opcode & 0xf0ff;
390 if (opcode != 0x0093 /* ocbi */
391 && opcode != 0x00c3 /* movca.l */)
393 gen_helper_discard_movcal_backup(cpu_env);
394 ctx->has_movcal = 0;
398 #if 0
399 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
400 #endif
402 switch (ctx->opcode) {
403 case 0x0019: /* div0u */
404 tcg_gen_movi_i32(cpu_sr_m, 0);
405 tcg_gen_movi_i32(cpu_sr_q, 0);
406 tcg_gen_movi_i32(cpu_sr_t, 0);
407 return;
408 case 0x000b: /* rts */
409 CHECK_NOT_DELAY_SLOT
410 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
411 ctx->envflags |= DELAY_SLOT;
412 ctx->delayed_pc = (uint32_t) - 1;
413 return;
414 case 0x0028: /* clrmac */
415 tcg_gen_movi_i32(cpu_mach, 0);
416 tcg_gen_movi_i32(cpu_macl, 0);
417 return;
418 case 0x0048: /* clrs */
419 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
420 return;
421 case 0x0008: /* clrt */
422 tcg_gen_movi_i32(cpu_sr_t, 0);
423 return;
424 case 0x0038: /* ldtlb */
425 CHECK_PRIVILEGED
426 gen_helper_ldtlb(cpu_env);
427 return;
428 case 0x002b: /* rte */
429 CHECK_PRIVILEGED
430 CHECK_NOT_DELAY_SLOT
431 gen_write_sr(cpu_ssr);
432 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
433 ctx->envflags |= DELAY_SLOT_RTE;
434 ctx->delayed_pc = (uint32_t) - 1;
435 ctx->bstate = BS_STOP;
436 return;
437 case 0x0058: /* sets */
438 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
439 return;
440 case 0x0018: /* sett */
441 tcg_gen_movi_i32(cpu_sr_t, 1);
442 return;
443 case 0xfbfd: /* frchg */
444 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
445 ctx->bstate = BS_STOP;
446 return;
447 case 0xf3fd: /* fschg */
448 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
449 ctx->bstate = BS_STOP;
450 return;
451 case 0x0009: /* nop */
452 return;
453 case 0x001b: /* sleep */
454 CHECK_PRIVILEGED
455 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
456 gen_helper_sleep(cpu_env);
457 return;
460 switch (ctx->opcode & 0xf000) {
461 case 0x1000: /* mov.l Rm,@(disp,Rn) */
463 TCGv addr = tcg_temp_new();
464 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
465 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
466 tcg_temp_free(addr);
468 return;
469 case 0x5000: /* mov.l @(disp,Rm),Rn */
471 TCGv addr = tcg_temp_new();
472 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
473 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
474 tcg_temp_free(addr);
476 return;
477 case 0xe000: /* mov #imm,Rn */
478 tcg_gen_movi_i32(REG(B11_8), B7_0s);
479 return;
480 case 0x9000: /* mov.w @(disp,PC),Rn */
482 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
483 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
484 tcg_temp_free(addr);
486 return;
487 case 0xd000: /* mov.l @(disp,PC),Rn */
489 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
490 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
491 tcg_temp_free(addr);
493 return;
494 case 0x7000: /* add #imm,Rn */
495 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
496 return;
497 case 0xa000: /* bra disp */
498 CHECK_NOT_DELAY_SLOT
499 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
500 ctx->envflags |= DELAY_SLOT;
501 return;
502 case 0xb000: /* bsr disp */
503 CHECK_NOT_DELAY_SLOT
504 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
505 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
506 ctx->envflags |= DELAY_SLOT;
507 return;
510 switch (ctx->opcode & 0xf00f) {
511 case 0x6003: /* mov Rm,Rn */
512 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
513 return;
514 case 0x2000: /* mov.b Rm,@Rn */
515 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
516 return;
517 case 0x2001: /* mov.w Rm,@Rn */
518 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
519 return;
520 case 0x2002: /* mov.l Rm,@Rn */
521 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
522 return;
523 case 0x6000: /* mov.b @Rm,Rn */
524 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
525 return;
526 case 0x6001: /* mov.w @Rm,Rn */
527 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
528 return;
529 case 0x6002: /* mov.l @Rm,Rn */
530 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
531 return;
532 case 0x2004: /* mov.b Rm,@-Rn */
534 TCGv addr = tcg_temp_new();
535 tcg_gen_subi_i32(addr, REG(B11_8), 1);
536 /* might cause re-execution */
537 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
538 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
539 tcg_temp_free(addr);
541 return;
542 case 0x2005: /* mov.w Rm,@-Rn */
544 TCGv addr = tcg_temp_new();
545 tcg_gen_subi_i32(addr, REG(B11_8), 2);
546 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
547 tcg_gen_mov_i32(REG(B11_8), addr);
548 tcg_temp_free(addr);
550 return;
551 case 0x2006: /* mov.l Rm,@-Rn */
553 TCGv addr = tcg_temp_new();
554 tcg_gen_subi_i32(addr, REG(B11_8), 4);
555 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
556 tcg_gen_mov_i32(REG(B11_8), addr);
558 return;
559 case 0x6004: /* mov.b @Rm+,Rn */
560 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
561 if ( B11_8 != B7_4 )
562 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
563 return;
564 case 0x6005: /* mov.w @Rm+,Rn */
565 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
566 if ( B11_8 != B7_4 )
567 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
568 return;
569 case 0x6006: /* mov.l @Rm+,Rn */
570 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
571 if ( B11_8 != B7_4 )
572 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
573 return;
574 case 0x0004: /* mov.b Rm,@(R0,Rn) */
576 TCGv addr = tcg_temp_new();
577 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
578 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
579 tcg_temp_free(addr);
581 return;
582 case 0x0005: /* mov.w Rm,@(R0,Rn) */
584 TCGv addr = tcg_temp_new();
585 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
586 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
587 tcg_temp_free(addr);
589 return;
590 case 0x0006: /* mov.l Rm,@(R0,Rn) */
592 TCGv addr = tcg_temp_new();
593 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
594 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
595 tcg_temp_free(addr);
597 return;
598 case 0x000c: /* mov.b @(R0,Rm),Rn */
600 TCGv addr = tcg_temp_new();
601 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
602 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
603 tcg_temp_free(addr);
605 return;
606 case 0x000d: /* mov.w @(R0,Rm),Rn */
608 TCGv addr = tcg_temp_new();
609 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
610 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
611 tcg_temp_free(addr);
613 return;
614 case 0x000e: /* mov.l @(R0,Rm),Rn */
616 TCGv addr = tcg_temp_new();
617 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
618 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
619 tcg_temp_free(addr);
621 return;
622 case 0x6008: /* swap.b Rm,Rn */
624 TCGv low = tcg_temp_new();;
625 tcg_gen_ext16u_i32(low, REG(B7_4));
626 tcg_gen_bswap16_i32(low, low);
627 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
628 tcg_temp_free(low);
630 return;
631 case 0x6009: /* swap.w Rm,Rn */
632 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
633 return;
634 case 0x200d: /* xtrct Rm,Rn */
636 TCGv high, low;
637 high = tcg_temp_new();
638 tcg_gen_shli_i32(high, REG(B7_4), 16);
639 low = tcg_temp_new();
640 tcg_gen_shri_i32(low, REG(B11_8), 16);
641 tcg_gen_or_i32(REG(B11_8), high, low);
642 tcg_temp_free(low);
643 tcg_temp_free(high);
645 return;
646 case 0x300c: /* add Rm,Rn */
647 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
648 return;
649 case 0x300e: /* addc Rm,Rn */
651 TCGv t0, t1;
652 t0 = tcg_const_tl(0);
653 t1 = tcg_temp_new();
654 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
655 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
656 REG(B11_8), t0, t1, cpu_sr_t);
657 tcg_temp_free(t0);
658 tcg_temp_free(t1);
660 return;
661 case 0x300f: /* addv Rm,Rn */
663 TCGv t0, t1, t2;
664 t0 = tcg_temp_new();
665 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
666 t1 = tcg_temp_new();
667 tcg_gen_xor_i32(t1, t0, REG(B11_8));
668 t2 = tcg_temp_new();
669 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
670 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
671 tcg_temp_free(t2);
672 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
673 tcg_temp_free(t1);
674 tcg_gen_mov_i32(REG(B7_4), t0);
675 tcg_temp_free(t0);
677 return;
678 case 0x2009: /* and Rm,Rn */
679 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
680 return;
681 case 0x3000: /* cmp/eq Rm,Rn */
682 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
683 return;
684 case 0x3003: /* cmp/ge Rm,Rn */
685 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
686 return;
687 case 0x3007: /* cmp/gt Rm,Rn */
688 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
689 return;
690 case 0x3006: /* cmp/hi Rm,Rn */
691 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
692 return;
693 case 0x3002: /* cmp/hs Rm,Rn */
694 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
695 return;
696 case 0x200c: /* cmp/str Rm,Rn */
698 TCGv cmp1 = tcg_temp_new();
699 TCGv cmp2 = tcg_temp_new();
700 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
701 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
702 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
703 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
704 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
705 tcg_temp_free(cmp2);
706 tcg_temp_free(cmp1);
708 return;
709 case 0x2007: /* div0s Rm,Rn */
710 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
711 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
712 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
713 return;
714 case 0x3004: /* div1 Rm,Rn */
716 TCGv t0 = tcg_temp_new();
717 TCGv t1 = tcg_temp_new();
718 TCGv t2 = tcg_temp_new();
719 TCGv zero = tcg_const_i32(0);
721 /* shift left arg1, saving the bit being pushed out and inserting
722 T on the right */
723 tcg_gen_shri_i32(t0, REG(B11_8), 31);
724 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
725 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
727 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
728 using 64-bit temps, we compute arg0's high part from q ^ m, so
729 that it is 0x00000000 when adding the value or 0xffffffff when
730 subtracting it. */
731 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
732 tcg_gen_subi_i32(t1, t1, 1);
733 tcg_gen_neg_i32(t2, REG(B7_4));
734 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
735 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
737 /* compute T and Q depending on carry */
738 tcg_gen_andi_i32(t1, t1, 1);
739 tcg_gen_xor_i32(t1, t1, t0);
740 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
741 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
743 tcg_temp_free(zero);
744 tcg_temp_free(t2);
745 tcg_temp_free(t1);
746 tcg_temp_free(t0);
748 return;
749 case 0x300d: /* dmuls.l Rm,Rn */
750 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
751 return;
752 case 0x3005: /* dmulu.l Rm,Rn */
753 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
754 return;
755 case 0x600e: /* exts.b Rm,Rn */
756 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
757 return;
758 case 0x600f: /* exts.w Rm,Rn */
759 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
760 return;
761 case 0x600c: /* extu.b Rm,Rn */
762 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
763 return;
764 case 0x600d: /* extu.w Rm,Rn */
765 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
766 return;
767 case 0x000f: /* mac.l @Rm+,@Rn+ */
769 TCGv arg0, arg1;
770 arg0 = tcg_temp_new();
771 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
772 arg1 = tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
774 gen_helper_macl(cpu_env, arg0, arg1);
775 tcg_temp_free(arg1);
776 tcg_temp_free(arg0);
777 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
778 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
780 return;
781 case 0x400f: /* mac.w @Rm+,@Rn+ */
783 TCGv arg0, arg1;
784 arg0 = tcg_temp_new();
785 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
786 arg1 = tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
788 gen_helper_macw(cpu_env, arg0, arg1);
789 tcg_temp_free(arg1);
790 tcg_temp_free(arg0);
791 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
792 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
794 return;
795 case 0x0007: /* mul.l Rm,Rn */
796 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
797 return;
798 case 0x200f: /* muls.w Rm,Rn */
800 TCGv arg0, arg1;
801 arg0 = tcg_temp_new();
802 tcg_gen_ext16s_i32(arg0, REG(B7_4));
803 arg1 = tcg_temp_new();
804 tcg_gen_ext16s_i32(arg1, REG(B11_8));
805 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
806 tcg_temp_free(arg1);
807 tcg_temp_free(arg0);
809 return;
810 case 0x200e: /* mulu.w Rm,Rn */
812 TCGv arg0, arg1;
813 arg0 = tcg_temp_new();
814 tcg_gen_ext16u_i32(arg0, REG(B7_4));
815 arg1 = tcg_temp_new();
816 tcg_gen_ext16u_i32(arg1, REG(B11_8));
817 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
818 tcg_temp_free(arg1);
819 tcg_temp_free(arg0);
821 return;
822 case 0x600b: /* neg Rm,Rn */
823 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
824 return;
825 case 0x600a: /* negc Rm,Rn */
827 TCGv t0 = tcg_const_i32(0);
828 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
829 REG(B7_4), t0, cpu_sr_t, t0);
830 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
831 t0, t0, REG(B11_8), cpu_sr_t);
832 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
833 tcg_temp_free(t0);
835 return;
836 case 0x6007: /* not Rm,Rn */
837 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
838 return;
839 case 0x200b: /* or Rm,Rn */
840 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
841 return;
842 case 0x400c: /* shad Rm,Rn */
844 TCGv t0 = tcg_temp_new();
845 TCGv t1 = tcg_temp_new();
846 TCGv t2 = tcg_temp_new();
848 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
850 /* positive case: shift to the left */
851 tcg_gen_shl_i32(t1, REG(B11_8), t0);
853 /* negative case: shift to the right in two steps to
854 correctly handle the -32 case */
855 tcg_gen_xori_i32(t0, t0, 0x1f);
856 tcg_gen_sar_i32(t2, REG(B11_8), t0);
857 tcg_gen_sari_i32(t2, t2, 1);
859 /* select between the two cases */
860 tcg_gen_movi_i32(t0, 0);
861 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
863 tcg_temp_free(t0);
864 tcg_temp_free(t1);
865 tcg_temp_free(t2);
867 return;
868 case 0x400d: /* shld Rm,Rn */
870 TCGv t0 = tcg_temp_new();
871 TCGv t1 = tcg_temp_new();
872 TCGv t2 = tcg_temp_new();
874 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
876 /* positive case: shift to the left */
877 tcg_gen_shl_i32(t1, REG(B11_8), t0);
879 /* negative case: shift to the right in two steps to
880 correctly handle the -32 case */
881 tcg_gen_xori_i32(t0, t0, 0x1f);
882 tcg_gen_shr_i32(t2, REG(B11_8), t0);
883 tcg_gen_shri_i32(t2, t2, 1);
885 /* select between the two cases */
886 tcg_gen_movi_i32(t0, 0);
887 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
889 tcg_temp_free(t0);
890 tcg_temp_free(t1);
891 tcg_temp_free(t2);
893 return;
894 case 0x3008: /* sub Rm,Rn */
895 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
896 return;
897 case 0x300a: /* subc Rm,Rn */
899 TCGv t0, t1;
900 t0 = tcg_const_tl(0);
901 t1 = tcg_temp_new();
902 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
903 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
904 REG(B11_8), t0, t1, cpu_sr_t);
905 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
906 tcg_temp_free(t0);
907 tcg_temp_free(t1);
909 return;
910 case 0x300b: /* subv Rm,Rn */
912 TCGv t0, t1, t2;
913 t0 = tcg_temp_new();
914 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
915 t1 = tcg_temp_new();
916 tcg_gen_xor_i32(t1, t0, REG(B7_4));
917 t2 = tcg_temp_new();
918 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
919 tcg_gen_and_i32(t1, t1, t2);
920 tcg_temp_free(t2);
921 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
922 tcg_temp_free(t1);
923 tcg_gen_mov_i32(REG(B11_8), t0);
924 tcg_temp_free(t0);
926 return;
927 case 0x2008: /* tst Rm,Rn */
929 TCGv val = tcg_temp_new();
930 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
931 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
932 tcg_temp_free(val);
934 return;
935 case 0x200a: /* xor Rm,Rn */
936 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
937 return;
938 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
939 CHECK_FPU_ENABLED
940 if (ctx->tbflags & FPSCR_SZ) {
941 TCGv_i64 fp = tcg_temp_new_i64();
942 gen_load_fpr64(fp, XREG(B7_4));
943 gen_store_fpr64(fp, XREG(B11_8));
944 tcg_temp_free_i64(fp);
945 } else {
946 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
948 return;
949 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
950 CHECK_FPU_ENABLED
951 if (ctx->tbflags & FPSCR_SZ) {
952 TCGv addr_hi = tcg_temp_new();
953 int fr = XREG(B7_4);
954 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
955 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
956 ctx->memidx, MO_TEUL);
957 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
958 ctx->memidx, MO_TEUL);
959 tcg_temp_free(addr_hi);
960 } else {
961 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
962 ctx->memidx, MO_TEUL);
964 return;
965 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
966 CHECK_FPU_ENABLED
967 if (ctx->tbflags & FPSCR_SZ) {
968 TCGv addr_hi = tcg_temp_new();
969 int fr = XREG(B11_8);
970 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
971 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
972 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
973 tcg_temp_free(addr_hi);
974 } else {
975 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
976 ctx->memidx, MO_TEUL);
978 return;
979 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
980 CHECK_FPU_ENABLED
981 if (ctx->tbflags & FPSCR_SZ) {
982 TCGv addr_hi = tcg_temp_new();
983 int fr = XREG(B11_8);
984 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
985 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
986 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
987 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
988 tcg_temp_free(addr_hi);
989 } else {
990 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
991 ctx->memidx, MO_TEUL);
992 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
994 return;
995 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
996 CHECK_FPU_ENABLED
997 TCGv addr = tcg_temp_new_i32();
998 tcg_gen_subi_i32(addr, REG(B11_8), 4);
999 if (ctx->tbflags & FPSCR_SZ) {
1000 int fr = XREG(B7_4);
1001 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1002 tcg_gen_subi_i32(addr, addr, 4);
1003 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1004 } else {
1005 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1006 ctx->memidx, MO_TEUL);
1008 tcg_gen_mov_i32(REG(B11_8), addr);
1009 tcg_temp_free(addr);
1010 return;
1011 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1012 CHECK_FPU_ENABLED
1014 TCGv addr = tcg_temp_new_i32();
1015 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1016 if (ctx->tbflags & FPSCR_SZ) {
1017 int fr = XREG(B11_8);
1018 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1019 ctx->memidx, MO_TEUL);
1020 tcg_gen_addi_i32(addr, addr, 4);
1021 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1022 ctx->memidx, MO_TEUL);
1023 } else {
1024 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1025 ctx->memidx, MO_TEUL);
1027 tcg_temp_free(addr);
1029 return;
1030 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1031 CHECK_FPU_ENABLED
1033 TCGv addr = tcg_temp_new();
1034 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1035 if (ctx->tbflags & FPSCR_SZ) {
1036 int fr = XREG(B7_4);
1037 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1038 ctx->memidx, MO_TEUL);
1039 tcg_gen_addi_i32(addr, addr, 4);
1040 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1041 ctx->memidx, MO_TEUL);
1042 } else {
1043 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1044 ctx->memidx, MO_TEUL);
1046 tcg_temp_free(addr);
1048 return;
1049 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1050 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1051 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1054 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056 CHECK_FPU_ENABLED
1057 if (ctx->tbflags & FPSCR_PR) {
1058 TCGv_i64 fp0, fp1;
1060 if (ctx->opcode & 0x0110)
1061 break; /* illegal instruction */
1062 fp0 = tcg_temp_new_i64();
1063 fp1 = tcg_temp_new_i64();
1064 gen_load_fpr64(fp0, DREG(B11_8));
1065 gen_load_fpr64(fp1, DREG(B7_4));
1066 switch (ctx->opcode & 0xf00f) {
1067 case 0xf000: /* fadd Rm,Rn */
1068 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1069 break;
1070 case 0xf001: /* fsub Rm,Rn */
1071 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1072 break;
1073 case 0xf002: /* fmul Rm,Rn */
1074 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1075 break;
1076 case 0xf003: /* fdiv Rm,Rn */
1077 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1078 break;
1079 case 0xf004: /* fcmp/eq Rm,Rn */
1080 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1081 return;
1082 case 0xf005: /* fcmp/gt Rm,Rn */
1083 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1084 return;
1086 gen_store_fpr64(fp0, DREG(B11_8));
1087 tcg_temp_free_i64(fp0);
1088 tcg_temp_free_i64(fp1);
1089 } else {
1090 switch (ctx->opcode & 0xf00f) {
1091 case 0xf000: /* fadd Rm,Rn */
1092 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1093 cpu_fregs[FREG(B11_8)],
1094 cpu_fregs[FREG(B7_4)]);
1095 break;
1096 case 0xf001: /* fsub Rm,Rn */
1097 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1098 cpu_fregs[FREG(B11_8)],
1099 cpu_fregs[FREG(B7_4)]);
1100 break;
1101 case 0xf002: /* fmul Rm,Rn */
1102 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1103 cpu_fregs[FREG(B11_8)],
1104 cpu_fregs[FREG(B7_4)]);
1105 break;
1106 case 0xf003: /* fdiv Rm,Rn */
1107 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1108 cpu_fregs[FREG(B11_8)],
1109 cpu_fregs[FREG(B7_4)]);
1110 break;
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1113 cpu_fregs[FREG(B7_4)]);
1114 return;
1115 case 0xf005: /* fcmp/gt Rm,Rn */
1116 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1117 cpu_fregs[FREG(B7_4)]);
1118 return;
1122 return;
1123 case 0xf00e: /* fmac FR0,RM,Rn */
1125 CHECK_FPU_ENABLED
1126 if (ctx->tbflags & FPSCR_PR) {
1127 break; /* illegal instruction */
1128 } else {
1129 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1130 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1131 cpu_fregs[FREG(B11_8)]);
1132 return;
1137 switch (ctx->opcode & 0xff00) {
1138 case 0xc900: /* and #imm,R0 */
1139 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1140 return;
1141 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1143 TCGv addr, val;
1144 addr = tcg_temp_new();
1145 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1146 val = tcg_temp_new();
1147 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1148 tcg_gen_andi_i32(val, val, B7_0);
1149 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1150 tcg_temp_free(val);
1151 tcg_temp_free(addr);
1153 return;
1154 case 0x8b00: /* bf label */
1155 CHECK_NOT_DELAY_SLOT
1156 gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2);
1157 return;
1158 case 0x8f00: /* bf/s label */
1159 CHECK_NOT_DELAY_SLOT
1160 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1161 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1162 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1163 return;
1164 case 0x8900: /* bt label */
1165 CHECK_NOT_DELAY_SLOT
1166 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2);
1167 return;
1168 case 0x8d00: /* bt/s label */
1169 CHECK_NOT_DELAY_SLOT
1170 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1171 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1172 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1173 return;
1174 case 0x8800: /* cmp/eq #imm,R0 */
1175 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1176 return;
1177 case 0xc400: /* mov.b @(disp,GBR),R0 */
1179 TCGv addr = tcg_temp_new();
1180 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1181 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1182 tcg_temp_free(addr);
1184 return;
1185 case 0xc500: /* mov.w @(disp,GBR),R0 */
1187 TCGv addr = tcg_temp_new();
1188 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1189 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1190 tcg_temp_free(addr);
1192 return;
1193 case 0xc600: /* mov.l @(disp,GBR),R0 */
1195 TCGv addr = tcg_temp_new();
1196 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1197 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1198 tcg_temp_free(addr);
1200 return;
1201 case 0xc000: /* mov.b R0,@(disp,GBR) */
1203 TCGv addr = tcg_temp_new();
1204 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1205 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1206 tcg_temp_free(addr);
1208 return;
1209 case 0xc100: /* mov.w R0,@(disp,GBR) */
1211 TCGv addr = tcg_temp_new();
1212 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1213 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1214 tcg_temp_free(addr);
1216 return;
1217 case 0xc200: /* mov.l R0,@(disp,GBR) */
1219 TCGv addr = tcg_temp_new();
1220 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1221 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1222 tcg_temp_free(addr);
1224 return;
1225 case 0x8000: /* mov.b R0,@(disp,Rn) */
1227 TCGv addr = tcg_temp_new();
1228 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1229 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1230 tcg_temp_free(addr);
1232 return;
1233 case 0x8100: /* mov.w R0,@(disp,Rn) */
1235 TCGv addr = tcg_temp_new();
1236 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1237 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1238 tcg_temp_free(addr);
1240 return;
1241 case 0x8400: /* mov.b @(disp,Rn),R0 */
1243 TCGv addr = tcg_temp_new();
1244 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1245 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1246 tcg_temp_free(addr);
1248 return;
1249 case 0x8500: /* mov.w @(disp,Rn),R0 */
1251 TCGv addr = tcg_temp_new();
1252 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1253 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1254 tcg_temp_free(addr);
1256 return;
1257 case 0xc700: /* mova @(disp,PC),R0 */
1258 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1259 return;
1260 case 0xcb00: /* or #imm,R0 */
1261 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1262 return;
1263 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1265 TCGv addr, val;
1266 addr = tcg_temp_new();
1267 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1268 val = tcg_temp_new();
1269 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1270 tcg_gen_ori_i32(val, val, B7_0);
1271 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1272 tcg_temp_free(val);
1273 tcg_temp_free(addr);
1275 return;
1276 case 0xc300: /* trapa #imm */
1278 TCGv imm;
1279 CHECK_NOT_DELAY_SLOT
1280 gen_save_cpu_state(ctx, true);
1281 imm = tcg_const_i32(B7_0);
1282 gen_helper_trapa(cpu_env, imm);
1283 tcg_temp_free(imm);
1284 ctx->bstate = BS_EXCP;
1286 return;
1287 case 0xc800: /* tst #imm,R0 */
1289 TCGv val = tcg_temp_new();
1290 tcg_gen_andi_i32(val, REG(0), B7_0);
1291 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1292 tcg_temp_free(val);
1294 return;
1295 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1297 TCGv val = tcg_temp_new();
1298 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1299 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1300 tcg_gen_andi_i32(val, val, B7_0);
1301 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1302 tcg_temp_free(val);
1304 return;
1305 case 0xca00: /* xor #imm,R0 */
1306 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1307 return;
1308 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1310 TCGv addr, val;
1311 addr = tcg_temp_new();
1312 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1313 val = tcg_temp_new();
1314 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1315 tcg_gen_xori_i32(val, val, B7_0);
1316 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1317 tcg_temp_free(val);
1318 tcg_temp_free(addr);
1320 return;
1323 switch (ctx->opcode & 0xf08f) {
1324 case 0x408e: /* ldc Rm,Rn_BANK */
1325 CHECK_PRIVILEGED
1326 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1327 return;
1328 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1329 CHECK_PRIVILEGED
1330 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1331 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1332 return;
1333 case 0x0082: /* stc Rm_BANK,Rn */
1334 CHECK_PRIVILEGED
1335 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1336 return;
1337 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1338 CHECK_PRIVILEGED
1340 TCGv addr = tcg_temp_new();
1341 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1342 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1343 tcg_gen_mov_i32(REG(B11_8), addr);
1344 tcg_temp_free(addr);
1346 return;
1349 switch (ctx->opcode & 0xf0ff) {
1350 case 0x0023: /* braf Rn */
1351 CHECK_NOT_DELAY_SLOT
1352 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1353 ctx->envflags |= DELAY_SLOT;
1354 ctx->delayed_pc = (uint32_t) - 1;
1355 return;
1356 case 0x0003: /* bsrf Rn */
1357 CHECK_NOT_DELAY_SLOT
1358 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1359 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1360 ctx->envflags |= DELAY_SLOT;
1361 ctx->delayed_pc = (uint32_t) - 1;
1362 return;
1363 case 0x4015: /* cmp/pl Rn */
1364 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1365 return;
1366 case 0x4011: /* cmp/pz Rn */
1367 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1368 return;
1369 case 0x4010: /* dt Rn */
1370 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1371 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1372 return;
1373 case 0x402b: /* jmp @Rn */
1374 CHECK_NOT_DELAY_SLOT
1375 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1376 ctx->envflags |= DELAY_SLOT;
1377 ctx->delayed_pc = (uint32_t) - 1;
1378 return;
1379 case 0x400b: /* jsr @Rn */
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1382 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1383 ctx->envflags |= DELAY_SLOT;
1384 ctx->delayed_pc = (uint32_t) - 1;
1385 return;
1386 case 0x400e: /* ldc Rm,SR */
1387 CHECK_PRIVILEGED
1389 TCGv val = tcg_temp_new();
1390 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1391 gen_write_sr(val);
1392 tcg_temp_free(val);
1393 ctx->bstate = BS_STOP;
1395 return;
1396 case 0x4007: /* ldc.l @Rm+,SR */
1397 CHECK_PRIVILEGED
1399 TCGv val = tcg_temp_new();
1400 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1401 tcg_gen_andi_i32(val, val, 0x700083f3);
1402 gen_write_sr(val);
1403 tcg_temp_free(val);
1404 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1405 ctx->bstate = BS_STOP;
1407 return;
1408 case 0x0002: /* stc SR,Rn */
1409 CHECK_PRIVILEGED
1410 gen_read_sr(REG(B11_8));
1411 return;
1412 case 0x4003: /* stc SR,@-Rn */
1413 CHECK_PRIVILEGED
1415 TCGv addr = tcg_temp_new();
1416 TCGv val = tcg_temp_new();
1417 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1418 gen_read_sr(val);
1419 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1420 tcg_gen_mov_i32(REG(B11_8), addr);
1421 tcg_temp_free(val);
1422 tcg_temp_free(addr);
1424 return;
1425 #define LD(reg,ldnum,ldpnum,prechk) \
1426 case ldnum: \
1427 prechk \
1428 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1429 return; \
1430 case ldpnum: \
1431 prechk \
1432 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1434 return;
1435 #define ST(reg,stnum,stpnum,prechk) \
1436 case stnum: \
1437 prechk \
1438 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1439 return; \
1440 case stpnum: \
1441 prechk \
1443 TCGv addr = tcg_temp_new(); \
1444 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1445 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1446 tcg_gen_mov_i32(REG(B11_8), addr); \
1447 tcg_temp_free(addr); \
1449 return;
1450 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1451 LD(reg,ldnum,ldpnum,prechk) \
1452 ST(reg,stnum,stpnum,prechk)
1453 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1454 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1455 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1456 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1457 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1458 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1459 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1460 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1461 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1462 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1463 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1464 case 0x406a: /* lds Rm,FPSCR */
1465 CHECK_FPU_ENABLED
1466 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1467 ctx->bstate = BS_STOP;
1468 return;
1469 case 0x4066: /* lds.l @Rm+,FPSCR */
1470 CHECK_FPU_ENABLED
1472 TCGv addr = tcg_temp_new();
1473 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1474 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1475 gen_helper_ld_fpscr(cpu_env, addr);
1476 tcg_temp_free(addr);
1477 ctx->bstate = BS_STOP;
1479 return;
1480 case 0x006a: /* sts FPSCR,Rn */
1481 CHECK_FPU_ENABLED
1482 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1483 return;
1484 case 0x4062: /* sts FPSCR,@-Rn */
1485 CHECK_FPU_ENABLED
1487 TCGv addr, val;
1488 val = tcg_temp_new();
1489 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1490 addr = tcg_temp_new();
1491 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1492 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1493 tcg_gen_mov_i32(REG(B11_8), addr);
1494 tcg_temp_free(addr);
1495 tcg_temp_free(val);
1497 return;
1498 case 0x00c3: /* movca.l R0,@Rm */
1500 TCGv val = tcg_temp_new();
1501 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1502 gen_helper_movcal(cpu_env, REG(B11_8), val);
1503 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1505 ctx->has_movcal = 1;
1506 return;
1507 case 0x40a9: /* movua.l @Rm,R0 */
1508 /* Load non-boundary-aligned data */
1509 if (ctx->features & SH_FEATURE_SH4A) {
1510 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1511 MO_TEUL | MO_UNALN);
1512 return;
1514 break;
1515 case 0x40e9: /* movua.l @Rm+,R0 */
1516 /* Load non-boundary-aligned data */
1517 if (ctx->features & SH_FEATURE_SH4A) {
1518 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1519 MO_TEUL | MO_UNALN);
1520 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1521 return;
1523 break;
1524 case 0x0029: /* movt Rn */
1525 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1526 return;
1527 case 0x0073:
1528 /* MOVCO.L
1529 LDST -> T
1530 If (T == 1) R0 -> (Rn)
1531 0 -> LDST
1533 if (ctx->features & SH_FEATURE_SH4A) {
1534 TCGLabel *label = gen_new_label();
1535 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1536 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1537 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1538 gen_set_label(label);
1539 tcg_gen_movi_i32(cpu_ldst, 0);
1540 return;
1541 } else
1542 break;
1543 case 0x0063:
1544 /* MOVLI.L @Rm,R0
1545 1 -> LDST
1546 (Rm) -> R0
1547 When interrupt/exception
1548 occurred 0 -> LDST
1550 if (ctx->features & SH_FEATURE_SH4A) {
1551 tcg_gen_movi_i32(cpu_ldst, 0);
1552 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1553 tcg_gen_movi_i32(cpu_ldst, 1);
1554 return;
1555 } else
1556 break;
1557 case 0x0093: /* ocbi @Rn */
1559 gen_helper_ocbi(cpu_env, REG(B11_8));
1561 return;
1562 case 0x00a3: /* ocbp @Rn */
1563 case 0x00b3: /* ocbwb @Rn */
1564 /* These instructions are supposed to do nothing in case of
1565 a cache miss. Given that we only partially emulate caches
1566 it is safe to simply ignore them. */
1567 return;
1568 case 0x0083: /* pref @Rn */
1569 return;
1570 case 0x00d3: /* prefi @Rn */
1571 if (ctx->features & SH_FEATURE_SH4A)
1572 return;
1573 else
1574 break;
1575 case 0x00e3: /* icbi @Rn */
1576 if (ctx->features & SH_FEATURE_SH4A)
1577 return;
1578 else
1579 break;
1580 case 0x00ab: /* synco */
1581 if (ctx->features & SH_FEATURE_SH4A) {
1582 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1583 return;
1585 break;
1586 case 0x4024: /* rotcl Rn */
1588 TCGv tmp = tcg_temp_new();
1589 tcg_gen_mov_i32(tmp, cpu_sr_t);
1590 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1591 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1592 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1593 tcg_temp_free(tmp);
1595 return;
1596 case 0x4025: /* rotcr Rn */
1598 TCGv tmp = tcg_temp_new();
1599 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1600 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1601 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1602 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1603 tcg_temp_free(tmp);
1605 return;
1606 case 0x4004: /* rotl Rn */
1607 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1608 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1609 return;
1610 case 0x4005: /* rotr Rn */
1611 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1612 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1613 return;
1614 case 0x4000: /* shll Rn */
1615 case 0x4020: /* shal Rn */
1616 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1617 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1618 return;
1619 case 0x4021: /* shar Rn */
1620 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1621 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1622 return;
1623 case 0x4001: /* shlr Rn */
1624 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1625 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1626 return;
1627 case 0x4008: /* shll2 Rn */
1628 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1629 return;
1630 case 0x4018: /* shll8 Rn */
1631 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1632 return;
1633 case 0x4028: /* shll16 Rn */
1634 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1635 return;
1636 case 0x4009: /* shlr2 Rn */
1637 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1638 return;
1639 case 0x4019: /* shlr8 Rn */
1640 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1641 return;
1642 case 0x4029: /* shlr16 Rn */
1643 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1644 return;
1645 case 0x401b: /* tas.b @Rn */
1647 TCGv val = tcg_const_i32(0x80);
1648 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1649 ctx->memidx, MO_UB);
1650 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1651 tcg_temp_free(val);
1653 return;
1654 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1655 CHECK_FPU_ENABLED
1656 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1657 return;
1658 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1659 CHECK_FPU_ENABLED
1660 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1661 return;
1662 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1663 CHECK_FPU_ENABLED
1664 if (ctx->tbflags & FPSCR_PR) {
1665 TCGv_i64 fp;
1666 if (ctx->opcode & 0x0100)
1667 break; /* illegal instruction */
1668 fp = tcg_temp_new_i64();
1669 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1670 gen_store_fpr64(fp, DREG(B11_8));
1671 tcg_temp_free_i64(fp);
1673 else {
1674 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1676 return;
1677 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1678 CHECK_FPU_ENABLED
1679 if (ctx->tbflags & FPSCR_PR) {
1680 TCGv_i64 fp;
1681 if (ctx->opcode & 0x0100)
1682 break; /* illegal instruction */
1683 fp = tcg_temp_new_i64();
1684 gen_load_fpr64(fp, DREG(B11_8));
1685 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1686 tcg_temp_free_i64(fp);
1688 else {
1689 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1691 return;
1692 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1693 CHECK_FPU_ENABLED
1695 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1697 return;
1698 case 0xf05d: /* fabs FRn/DRn */
1699 CHECK_FPU_ENABLED
1700 if (ctx->tbflags & FPSCR_PR) {
1701 if (ctx->opcode & 0x0100)
1702 break; /* illegal instruction */
1703 TCGv_i64 fp = tcg_temp_new_i64();
1704 gen_load_fpr64(fp, DREG(B11_8));
1705 gen_helper_fabs_DT(fp, fp);
1706 gen_store_fpr64(fp, DREG(B11_8));
1707 tcg_temp_free_i64(fp);
1708 } else {
1709 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1711 return;
1712 case 0xf06d: /* fsqrt FRn */
1713 CHECK_FPU_ENABLED
1714 if (ctx->tbflags & FPSCR_PR) {
1715 if (ctx->opcode & 0x0100)
1716 break; /* illegal instruction */
1717 TCGv_i64 fp = tcg_temp_new_i64();
1718 gen_load_fpr64(fp, DREG(B11_8));
1719 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1720 gen_store_fpr64(fp, DREG(B11_8));
1721 tcg_temp_free_i64(fp);
1722 } else {
1723 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1724 cpu_fregs[FREG(B11_8)]);
1726 return;
1727 case 0xf07d: /* fsrra FRn */
1728 CHECK_FPU_ENABLED
1729 break;
1730 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1731 CHECK_FPU_ENABLED
1732 if (!(ctx->tbflags & FPSCR_PR)) {
1733 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1735 return;
1736 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1737 CHECK_FPU_ENABLED
1738 if (!(ctx->tbflags & FPSCR_PR)) {
1739 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1741 return;
1742 case 0xf0ad: /* fcnvsd FPUL,DRn */
1743 CHECK_FPU_ENABLED
1745 TCGv_i64 fp = tcg_temp_new_i64();
1746 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1747 gen_store_fpr64(fp, DREG(B11_8));
1748 tcg_temp_free_i64(fp);
1750 return;
1751 case 0xf0bd: /* fcnvds DRn,FPUL */
1752 CHECK_FPU_ENABLED
1754 TCGv_i64 fp = tcg_temp_new_i64();
1755 gen_load_fpr64(fp, DREG(B11_8));
1756 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1757 tcg_temp_free_i64(fp);
1759 return;
1760 case 0xf0ed: /* fipr FVm,FVn */
1761 CHECK_FPU_ENABLED
1762 if ((ctx->tbflags & FPSCR_PR) == 0) {
1763 TCGv m, n;
1764 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1765 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1766 gen_helper_fipr(cpu_env, m, n);
1767 tcg_temp_free(m);
1768 tcg_temp_free(n);
1769 return;
1771 break;
1772 case 0xf0fd: /* ftrv XMTRX,FVn */
1773 CHECK_FPU_ENABLED
1774 if ((ctx->opcode & 0x0300) == 0x0100 &&
1775 (ctx->tbflags & FPSCR_PR) == 0) {
1776 TCGv n;
1777 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1778 gen_helper_ftrv(cpu_env, n);
1779 tcg_temp_free(n);
1780 return;
1782 break;
1784 #if 0
1785 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1786 ctx->opcode, ctx->pc);
1787 fflush(stderr);
1788 #endif
1789 gen_save_cpu_state(ctx, true);
1790 if (ctx->envflags & DELAY_SLOT_MASK) {
1791 gen_helper_raise_slot_illegal_instruction(cpu_env);
1792 } else {
1793 gen_helper_raise_illegal_instruction(cpu_env);
1795 ctx->bstate = BS_EXCP;
1798 static void decode_opc(DisasContext * ctx)
1800 uint32_t old_flags = ctx->envflags;
1802 _decode_opc(ctx);
1804 if (old_flags & DELAY_SLOT_MASK) {
1805 /* go out of the delay slot */
1806 ctx->envflags &= ~DELAY_SLOT_MASK;
1807 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1808 ctx->bstate = BS_BRANCH;
1809 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1810 gen_delayed_conditional_jump(ctx);
1811 } else {
1812 gen_jump(ctx);
1818 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1820 SuperHCPU *cpu = sh_env_get_cpu(env);
1821 CPUState *cs = CPU(cpu);
1822 DisasContext ctx;
1823 target_ulong pc_start;
1824 int num_insns;
1825 int max_insns;
1827 pc_start = tb->pc;
1828 ctx.pc = pc_start;
1829 ctx.tbflags = (uint32_t)tb->flags;
1830 ctx.envflags = tb->flags & DELAY_SLOT_MASK;
1831 ctx.bstate = BS_NONE;
1832 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
1833 /* We don't know if the delayed pc came from a dynamic or static branch,
1834 so assume it is a dynamic branch. */
1835 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1836 ctx.tb = tb;
1837 ctx.singlestep_enabled = cs->singlestep_enabled;
1838 ctx.features = env->features;
1839 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
1841 num_insns = 0;
1842 max_insns = tb->cflags & CF_COUNT_MASK;
1843 if (max_insns == 0) {
1844 max_insns = CF_COUNT_MASK;
1846 if (max_insns > TCG_MAX_INSNS) {
1847 max_insns = TCG_MAX_INSNS;
1850 gen_tb_start(tb);
1851 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1852 tcg_gen_insn_start(ctx.pc, ctx.envflags);
1853 num_insns++;
1855 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1856 /* We have hit a breakpoint - make sure PC is up-to-date */
1857 gen_save_cpu_state(&ctx, true);
1858 gen_helper_debug(cpu_env);
1859 ctx.bstate = BS_EXCP;
1860 /* The address covered by the breakpoint must be included in
1861 [tb->pc, tb->pc + tb->size) in order to for it to be
1862 properly cleared -- thus we increment the PC here so that
1863 the logic setting tb->size below does the right thing. */
1864 ctx.pc += 2;
1865 break;
1868 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1869 gen_io_start();
1872 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1873 decode_opc(&ctx);
1874 ctx.pc += 2;
1875 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1876 break;
1877 if (cs->singlestep_enabled) {
1878 break;
1880 if (num_insns >= max_insns)
1881 break;
1882 if (singlestep)
1883 break;
1885 if (tb->cflags & CF_LAST_IO)
1886 gen_io_end();
1887 if (cs->singlestep_enabled) {
1888 gen_save_cpu_state(&ctx, true);
1889 gen_helper_debug(cpu_env);
1890 } else {
1891 switch (ctx.bstate) {
1892 case BS_STOP:
1893 gen_save_cpu_state(&ctx, true);
1894 tcg_gen_exit_tb(0);
1895 break;
1896 case BS_NONE:
1897 gen_save_cpu_state(&ctx, false);
1898 gen_goto_tb(&ctx, 0, ctx.pc);
1899 break;
1900 case BS_EXCP:
1901 /* fall through */
1902 case BS_BRANCH:
1903 default:
1904 break;
1908 gen_tb_end(tb, num_insns);
1910 tb->size = ctx.pc - pc_start;
1911 tb->icount = num_insns;
1913 #ifdef DEBUG_DISAS
1914 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1915 && qemu_log_in_addr_range(pc_start)) {
1916 qemu_log_lock();
1917 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1918 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1919 qemu_log("\n");
1920 qemu_log_unlock();
1922 #endif
1925 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1926 target_ulong *data)
1928 env->pc = data[0];
1929 env->flags = data[1];
1930 /* Theoretically delayed_pc should also be restored. In practice the
1931 branch instruction is re-executed after exception, so the delayed
1932 branch target will be recomputed. */