block: acquire in bdrv_query_image_info
[qemu/ar7.git] / target-sh4 / translate.c
blob9de5659cf73f9f9e85fb379919cd63d6d863933c
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasContext {
35 struct TranslationBlock *tb;
36 target_ulong pc;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
51 #endif
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
66 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
67 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
68 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
69 static TCGv cpu_fregs[32];
71 /* internal register indexes */
72 static TCGv cpu_flags, cpu_delayed_pc;
74 #include "exec/gen-icount.h"
76 void sh4_translate_init(void)
78 int i;
79 static int done_init = 0;
80 static const char * const gregnames[24] = {
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 static const char * const fregnames[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
98 if (done_init)
99 return;
101 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103 for (i = 0; i < 24; i++)
104 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
105 offsetof(CPUSH4State, gregs[i]),
106 gregnames[i]);
108 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUSH4State, pc), "PC");
110 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUSH4State, sr), "SR");
112 cpu_sr_m = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUSH4State, sr_m), "SR_M");
114 cpu_sr_q = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUSH4State, sr_q), "SR_Q");
116 cpu_sr_t = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUSH4State, sr_t), "SR_T");
118 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUSH4State, ssr), "SSR");
120 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUSH4State, spc), "SPC");
122 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUSH4State, gbr), "GBR");
124 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUSH4State, vbr), "VBR");
126 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUSH4State, sgr), "SGR");
128 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUSH4State, dbr), "DBR");
130 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUSH4State, mach), "MACH");
132 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUSH4State, macl), "MACL");
134 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUSH4State, pr), "PR");
136 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, fpscr), "FPSCR");
138 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, fpul), "FPUL");
141 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, flags), "_flags_");
143 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUSH4State, delayed_pc),
145 "_delayed_pc_");
146 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
147 offsetof(CPUSH4State, ldst), "_ldst_");
149 for (i = 0; i < 32; i++)
150 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
151 offsetof(CPUSH4State, fregs[i]),
152 fregnames[i]);
154 done_init = 1;
157 void superh_cpu_dump_state(CPUState *cs, FILE *f,
158 fprintf_function cpu_fprintf, int flags)
160 SuperHCPU *cpu = SUPERH_CPU(cs);
161 CPUSH4State *env = &cpu->env;
162 int i;
163 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
164 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
165 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
166 env->spc, env->ssr, env->gbr, env->vbr);
167 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
168 env->sgr, env->dbr, env->delayed_pc, env->fpul);
169 for (i = 0; i < 24; i += 4) {
170 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
171 i, env->gregs[i], i + 1, env->gregs[i + 1],
172 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
174 if (env->flags & DELAY_SLOT) {
175 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
176 env->delayed_pc);
177 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
178 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
179 env->delayed_pc);
183 static void gen_read_sr(TCGv dst)
185 TCGv t0 = tcg_temp_new();
186 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
187 tcg_gen_or_i32(dst, dst, t0);
188 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
189 tcg_gen_or_i32(dst, dst, t0);
190 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
191 tcg_gen_or_i32(dst, cpu_sr, t0);
192 tcg_temp_free_i32(t0);
195 static void gen_write_sr(TCGv src)
197 tcg_gen_andi_i32(cpu_sr, src,
198 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
199 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
200 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
201 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
202 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
203 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
204 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
207 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
209 TranslationBlock *tb;
210 tb = ctx->tb;
212 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
213 !ctx->singlestep_enabled) {
214 /* Use a direct jump if in same page and singlestep not enabled */
215 tcg_gen_goto_tb(n);
216 tcg_gen_movi_i32(cpu_pc, dest);
217 tcg_gen_exit_tb((uintptr_t)tb + n);
218 } else {
219 tcg_gen_movi_i32(cpu_pc, dest);
220 if (ctx->singlestep_enabled)
221 gen_helper_debug(cpu_env);
222 tcg_gen_exit_tb(0);
226 static void gen_jump(DisasContext * ctx)
228 if (ctx->delayed_pc == (uint32_t) - 1) {
229 /* Target is not statically known, it comes necessarily from a
230 delayed jump as immediate jump are conditinal jumps */
231 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
232 if (ctx->singlestep_enabled)
233 gen_helper_debug(cpu_env);
234 tcg_gen_exit_tb(0);
235 } else {
236 gen_goto_tb(ctx, 0, ctx->delayed_pc);
240 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
242 TCGLabel *label = gen_new_label();
243 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
244 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
245 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
246 gen_set_label(label);
249 /* Immediate conditional jump (bt or bf) */
250 static void gen_conditional_jump(DisasContext * ctx,
251 target_ulong ift, target_ulong ifnott)
253 TCGLabel *l1 = gen_new_label();
254 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
255 gen_goto_tb(ctx, 0, ifnott);
256 gen_set_label(l1);
257 gen_goto_tb(ctx, 1, ift);
260 /* Delayed conditional jump (bt or bf) */
261 static void gen_delayed_conditional_jump(DisasContext * ctx)
263 TCGLabel *l1;
264 TCGv ds;
266 l1 = gen_new_label();
267 ds = tcg_temp_new();
268 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
269 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
270 gen_goto_tb(ctx, 1, ctx->pc + 2);
271 gen_set_label(l1);
272 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
273 gen_jump(ctx);
276 static inline void gen_store_flags(uint32_t flags)
278 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
279 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
282 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
284 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
287 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
289 TCGv_i32 tmp = tcg_temp_new_i32();
290 tcg_gen_extrl_i64_i32(tmp, t);
291 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
292 tcg_gen_shri_i64(t, t, 32);
293 tcg_gen_extrl_i64_i32(tmp, t);
294 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
295 tcg_temp_free_i32(tmp);
298 #define B3_0 (ctx->opcode & 0xf)
299 #define B6_4 ((ctx->opcode >> 4) & 0x7)
300 #define B7_4 ((ctx->opcode >> 4) & 0xf)
301 #define B7_0 (ctx->opcode & 0xff)
302 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
303 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
304 (ctx->opcode & 0xfff))
305 #define B11_8 ((ctx->opcode >> 8) & 0xf)
306 #define B15_12 ((ctx->opcode >> 12) & 0xf)
308 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
309 && (ctx->flags & (1u << SR_RB))\
310 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
312 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
313 || !(ctx->flags & (1u << SR_RB)))\
314 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
316 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
317 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
318 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
319 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
321 #define CHECK_NOT_DELAY_SLOT \
322 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
324 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
325 gen_helper_raise_slot_illegal_instruction(cpu_env); \
326 ctx->bstate = BS_BRANCH; \
327 return; \
330 #define CHECK_PRIVILEGED \
331 if (IS_USER(ctx)) { \
332 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
333 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
334 gen_helper_raise_slot_illegal_instruction(cpu_env); \
335 } else { \
336 gen_helper_raise_illegal_instruction(cpu_env); \
338 ctx->bstate = BS_BRANCH; \
339 return; \
342 #define CHECK_FPU_ENABLED \
343 if (ctx->flags & (1u << SR_FD)) { \
344 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
345 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
346 gen_helper_raise_slot_fpu_disable(cpu_env); \
347 } else { \
348 gen_helper_raise_fpu_disable(cpu_env); \
350 ctx->bstate = BS_BRANCH; \
351 return; \
354 static void _decode_opc(DisasContext * ctx)
356 /* This code tries to make movcal emulation sufficiently
357 accurate for Linux purposes. This instruction writes
358 memory, and prior to that, always allocates a cache line.
359 It is used in two contexts:
360 - in memcpy, where data is copied in blocks, the first write
361 of to a block uses movca.l for performance.
362 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
363 to flush the cache. Here, the data written by movcal.l is never
364 written to memory, and the data written is just bogus.
366 To simulate this, we simulate movcal.l, we store the value to memory,
367 but we also remember the previous content. If we see ocbi, we check
368 if movcal.l for that address was done previously. If so, the write should
369 not have hit the memory, so we restore the previous content.
370 When we see an instruction that is neither movca.l
371 nor ocbi, the previous content is discarded.
373 To optimize, we only try to flush stores when we're at the start of
374 TB, or if we already saw movca.l in this TB and did not flush stores
375 yet. */
376 if (ctx->has_movcal)
378 int opcode = ctx->opcode & 0xf0ff;
379 if (opcode != 0x0093 /* ocbi */
380 && opcode != 0x00c3 /* movca.l */)
382 gen_helper_discard_movcal_backup(cpu_env);
383 ctx->has_movcal = 0;
387 #if 0
388 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
389 #endif
391 switch (ctx->opcode) {
392 case 0x0019: /* div0u */
393 tcg_gen_movi_i32(cpu_sr_m, 0);
394 tcg_gen_movi_i32(cpu_sr_q, 0);
395 tcg_gen_movi_i32(cpu_sr_t, 0);
396 return;
397 case 0x000b: /* rts */
398 CHECK_NOT_DELAY_SLOT
399 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
400 ctx->flags |= DELAY_SLOT;
401 ctx->delayed_pc = (uint32_t) - 1;
402 return;
403 case 0x0028: /* clrmac */
404 tcg_gen_movi_i32(cpu_mach, 0);
405 tcg_gen_movi_i32(cpu_macl, 0);
406 return;
407 case 0x0048: /* clrs */
408 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
409 return;
410 case 0x0008: /* clrt */
411 tcg_gen_movi_i32(cpu_sr_t, 0);
412 return;
413 case 0x0038: /* ldtlb */
414 CHECK_PRIVILEGED
415 gen_helper_ldtlb(cpu_env);
416 return;
417 case 0x002b: /* rte */
418 CHECK_PRIVILEGED
419 CHECK_NOT_DELAY_SLOT
420 gen_write_sr(cpu_ssr);
421 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
422 ctx->flags |= DELAY_SLOT;
423 ctx->delayed_pc = (uint32_t) - 1;
424 return;
425 case 0x0058: /* sets */
426 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
427 return;
428 case 0x0018: /* sett */
429 tcg_gen_movi_i32(cpu_sr_t, 1);
430 return;
431 case 0xfbfd: /* frchg */
432 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
433 ctx->bstate = BS_STOP;
434 return;
435 case 0xf3fd: /* fschg */
436 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
437 ctx->bstate = BS_STOP;
438 return;
439 case 0x0009: /* nop */
440 return;
441 case 0x001b: /* sleep */
442 CHECK_PRIVILEGED
443 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
444 gen_helper_sleep(cpu_env);
445 return;
448 switch (ctx->opcode & 0xf000) {
449 case 0x1000: /* mov.l Rm,@(disp,Rn) */
451 TCGv addr = tcg_temp_new();
452 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
453 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
454 tcg_temp_free(addr);
456 return;
457 case 0x5000: /* mov.l @(disp,Rm),Rn */
459 TCGv addr = tcg_temp_new();
460 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
461 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
462 tcg_temp_free(addr);
464 return;
465 case 0xe000: /* mov #imm,Rn */
466 tcg_gen_movi_i32(REG(B11_8), B7_0s);
467 return;
468 case 0x9000: /* mov.w @(disp,PC),Rn */
470 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
471 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
472 tcg_temp_free(addr);
474 return;
475 case 0xd000: /* mov.l @(disp,PC),Rn */
477 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
478 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
479 tcg_temp_free(addr);
481 return;
482 case 0x7000: /* add #imm,Rn */
483 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
484 return;
485 case 0xa000: /* bra disp */
486 CHECK_NOT_DELAY_SLOT
487 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
488 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
489 ctx->flags |= DELAY_SLOT;
490 return;
491 case 0xb000: /* bsr disp */
492 CHECK_NOT_DELAY_SLOT
493 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
494 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
495 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
496 ctx->flags |= DELAY_SLOT;
497 return;
500 switch (ctx->opcode & 0xf00f) {
501 case 0x6003: /* mov Rm,Rn */
502 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
503 return;
504 case 0x2000: /* mov.b Rm,@Rn */
505 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
506 return;
507 case 0x2001: /* mov.w Rm,@Rn */
508 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
509 return;
510 case 0x2002: /* mov.l Rm,@Rn */
511 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
512 return;
513 case 0x6000: /* mov.b @Rm,Rn */
514 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
515 return;
516 case 0x6001: /* mov.w @Rm,Rn */
517 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
518 return;
519 case 0x6002: /* mov.l @Rm,Rn */
520 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
521 return;
522 case 0x2004: /* mov.b Rm,@-Rn */
524 TCGv addr = tcg_temp_new();
525 tcg_gen_subi_i32(addr, REG(B11_8), 1);
526 /* might cause re-execution */
527 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
528 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
529 tcg_temp_free(addr);
531 return;
532 case 0x2005: /* mov.w Rm,@-Rn */
534 TCGv addr = tcg_temp_new();
535 tcg_gen_subi_i32(addr, REG(B11_8), 2);
536 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
537 tcg_gen_mov_i32(REG(B11_8), addr);
538 tcg_temp_free(addr);
540 return;
541 case 0x2006: /* mov.l Rm,@-Rn */
543 TCGv addr = tcg_temp_new();
544 tcg_gen_subi_i32(addr, REG(B11_8), 4);
545 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
546 tcg_gen_mov_i32(REG(B11_8), addr);
548 return;
549 case 0x6004: /* mov.b @Rm+,Rn */
550 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
551 if ( B11_8 != B7_4 )
552 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
553 return;
554 case 0x6005: /* mov.w @Rm+,Rn */
555 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
556 if ( B11_8 != B7_4 )
557 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
558 return;
559 case 0x6006: /* mov.l @Rm+,Rn */
560 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
561 if ( B11_8 != B7_4 )
562 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
563 return;
564 case 0x0004: /* mov.b Rm,@(R0,Rn) */
566 TCGv addr = tcg_temp_new();
567 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
568 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
569 tcg_temp_free(addr);
571 return;
572 case 0x0005: /* mov.w Rm,@(R0,Rn) */
574 TCGv addr = tcg_temp_new();
575 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
576 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
577 tcg_temp_free(addr);
579 return;
580 case 0x0006: /* mov.l Rm,@(R0,Rn) */
582 TCGv addr = tcg_temp_new();
583 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
584 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
585 tcg_temp_free(addr);
587 return;
588 case 0x000c: /* mov.b @(R0,Rm),Rn */
590 TCGv addr = tcg_temp_new();
591 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
592 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
593 tcg_temp_free(addr);
595 return;
596 case 0x000d: /* mov.w @(R0,Rm),Rn */
598 TCGv addr = tcg_temp_new();
599 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
600 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
601 tcg_temp_free(addr);
603 return;
604 case 0x000e: /* mov.l @(R0,Rm),Rn */
606 TCGv addr = tcg_temp_new();
607 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
608 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
609 tcg_temp_free(addr);
611 return;
612 case 0x6008: /* swap.b Rm,Rn */
614 TCGv low = tcg_temp_new();;
615 tcg_gen_ext16u_i32(low, REG(B7_4));
616 tcg_gen_bswap16_i32(low, low);
617 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
618 tcg_temp_free(low);
620 return;
621 case 0x6009: /* swap.w Rm,Rn */
622 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
623 return;
624 case 0x200d: /* xtrct Rm,Rn */
626 TCGv high, low;
627 high = tcg_temp_new();
628 tcg_gen_shli_i32(high, REG(B7_4), 16);
629 low = tcg_temp_new();
630 tcg_gen_shri_i32(low, REG(B11_8), 16);
631 tcg_gen_or_i32(REG(B11_8), high, low);
632 tcg_temp_free(low);
633 tcg_temp_free(high);
635 return;
636 case 0x300c: /* add Rm,Rn */
637 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
638 return;
639 case 0x300e: /* addc Rm,Rn */
641 TCGv t0, t1;
642 t0 = tcg_const_tl(0);
643 t1 = tcg_temp_new();
644 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
645 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
646 REG(B11_8), t0, t1, cpu_sr_t);
647 tcg_temp_free(t0);
648 tcg_temp_free(t1);
650 return;
651 case 0x300f: /* addv Rm,Rn */
653 TCGv t0, t1, t2;
654 t0 = tcg_temp_new();
655 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
656 t1 = tcg_temp_new();
657 tcg_gen_xor_i32(t1, t0, REG(B11_8));
658 t2 = tcg_temp_new();
659 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
660 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
661 tcg_temp_free(t2);
662 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
663 tcg_temp_free(t1);
664 tcg_gen_mov_i32(REG(B7_4), t0);
665 tcg_temp_free(t0);
667 return;
668 case 0x2009: /* and Rm,Rn */
669 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
670 return;
671 case 0x3000: /* cmp/eq Rm,Rn */
672 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
673 return;
674 case 0x3003: /* cmp/ge Rm,Rn */
675 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
676 return;
677 case 0x3007: /* cmp/gt Rm,Rn */
678 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
679 return;
680 case 0x3006: /* cmp/hi Rm,Rn */
681 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
682 return;
683 case 0x3002: /* cmp/hs Rm,Rn */
684 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
685 return;
686 case 0x200c: /* cmp/str Rm,Rn */
688 TCGv cmp1 = tcg_temp_new();
689 TCGv cmp2 = tcg_temp_new();
690 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
691 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
692 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
693 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
694 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
695 tcg_temp_free(cmp2);
696 tcg_temp_free(cmp1);
698 return;
699 case 0x2007: /* div0s Rm,Rn */
700 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
701 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
702 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
703 return;
704 case 0x3004: /* div1 Rm,Rn */
706 TCGv t0 = tcg_temp_new();
707 TCGv t1 = tcg_temp_new();
708 TCGv t2 = tcg_temp_new();
709 TCGv zero = tcg_const_i32(0);
711 /* shift left arg1, saving the bit being pushed out and inserting
712 T on the right */
713 tcg_gen_shri_i32(t0, REG(B11_8), 31);
714 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
715 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
717 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
718 using 64-bit temps, we compute arg0's high part from q ^ m, so
719 that it is 0x00000000 when adding the value or 0xffffffff when
720 subtracting it. */
721 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
722 tcg_gen_subi_i32(t1, t1, 1);
723 tcg_gen_neg_i32(t2, REG(B7_4));
724 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
725 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
727 /* compute T and Q depending on carry */
728 tcg_gen_andi_i32(t1, t1, 1);
729 tcg_gen_xor_i32(t1, t1, t0);
730 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
731 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
733 tcg_temp_free(zero);
734 tcg_temp_free(t2);
735 tcg_temp_free(t1);
736 tcg_temp_free(t0);
738 return;
739 case 0x300d: /* dmuls.l Rm,Rn */
740 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
741 return;
742 case 0x3005: /* dmulu.l Rm,Rn */
743 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
744 return;
745 case 0x600e: /* exts.b Rm,Rn */
746 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
747 return;
748 case 0x600f: /* exts.w Rm,Rn */
749 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
750 return;
751 case 0x600c: /* extu.b Rm,Rn */
752 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
753 return;
754 case 0x600d: /* extu.w Rm,Rn */
755 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
756 return;
757 case 0x000f: /* mac.l @Rm+,@Rn+ */
759 TCGv arg0, arg1;
760 arg0 = tcg_temp_new();
761 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
762 arg1 = tcg_temp_new();
763 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
764 gen_helper_macl(cpu_env, arg0, arg1);
765 tcg_temp_free(arg1);
766 tcg_temp_free(arg0);
767 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
768 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
770 return;
771 case 0x400f: /* mac.w @Rm+,@Rn+ */
773 TCGv arg0, arg1;
774 arg0 = tcg_temp_new();
775 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
776 arg1 = tcg_temp_new();
777 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
778 gen_helper_macw(cpu_env, arg0, arg1);
779 tcg_temp_free(arg1);
780 tcg_temp_free(arg0);
781 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
782 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
784 return;
785 case 0x0007: /* mul.l Rm,Rn */
786 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
787 return;
788 case 0x200f: /* muls.w Rm,Rn */
790 TCGv arg0, arg1;
791 arg0 = tcg_temp_new();
792 tcg_gen_ext16s_i32(arg0, REG(B7_4));
793 arg1 = tcg_temp_new();
794 tcg_gen_ext16s_i32(arg1, REG(B11_8));
795 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
796 tcg_temp_free(arg1);
797 tcg_temp_free(arg0);
799 return;
800 case 0x200e: /* mulu.w Rm,Rn */
802 TCGv arg0, arg1;
803 arg0 = tcg_temp_new();
804 tcg_gen_ext16u_i32(arg0, REG(B7_4));
805 arg1 = tcg_temp_new();
806 tcg_gen_ext16u_i32(arg1, REG(B11_8));
807 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
808 tcg_temp_free(arg1);
809 tcg_temp_free(arg0);
811 return;
812 case 0x600b: /* neg Rm,Rn */
813 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
814 return;
815 case 0x600a: /* negc Rm,Rn */
817 TCGv t0 = tcg_const_i32(0);
818 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
819 REG(B7_4), t0, cpu_sr_t, t0);
820 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
821 t0, t0, REG(B11_8), cpu_sr_t);
822 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
823 tcg_temp_free(t0);
825 return;
826 case 0x6007: /* not Rm,Rn */
827 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
828 return;
829 case 0x200b: /* or Rm,Rn */
830 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
831 return;
832 case 0x400c: /* shad Rm,Rn */
834 TCGv t0 = tcg_temp_new();
835 TCGv t1 = tcg_temp_new();
836 TCGv t2 = tcg_temp_new();
838 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
840 /* positive case: shift to the left */
841 tcg_gen_shl_i32(t1, REG(B11_8), t0);
843 /* negative case: shift to the right in two steps to
844 correctly handle the -32 case */
845 tcg_gen_xori_i32(t0, t0, 0x1f);
846 tcg_gen_sar_i32(t2, REG(B11_8), t0);
847 tcg_gen_sari_i32(t2, t2, 1);
849 /* select between the two cases */
850 tcg_gen_movi_i32(t0, 0);
851 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
853 tcg_temp_free(t0);
854 tcg_temp_free(t1);
855 tcg_temp_free(t2);
857 return;
858 case 0x400d: /* shld Rm,Rn */
860 TCGv t0 = tcg_temp_new();
861 TCGv t1 = tcg_temp_new();
862 TCGv t2 = tcg_temp_new();
864 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
866 /* positive case: shift to the left */
867 tcg_gen_shl_i32(t1, REG(B11_8), t0);
869 /* negative case: shift to the right in two steps to
870 correctly handle the -32 case */
871 tcg_gen_xori_i32(t0, t0, 0x1f);
872 tcg_gen_shr_i32(t2, REG(B11_8), t0);
873 tcg_gen_shri_i32(t2, t2, 1);
875 /* select between the two cases */
876 tcg_gen_movi_i32(t0, 0);
877 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
879 tcg_temp_free(t0);
880 tcg_temp_free(t1);
881 tcg_temp_free(t2);
883 return;
884 case 0x3008: /* sub Rm,Rn */
885 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
886 return;
887 case 0x300a: /* subc Rm,Rn */
889 TCGv t0, t1;
890 t0 = tcg_const_tl(0);
891 t1 = tcg_temp_new();
892 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
893 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
894 REG(B11_8), t0, t1, cpu_sr_t);
895 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
896 tcg_temp_free(t0);
897 tcg_temp_free(t1);
899 return;
900 case 0x300b: /* subv Rm,Rn */
902 TCGv t0, t1, t2;
903 t0 = tcg_temp_new();
904 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
905 t1 = tcg_temp_new();
906 tcg_gen_xor_i32(t1, t0, REG(B7_4));
907 t2 = tcg_temp_new();
908 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
909 tcg_gen_and_i32(t1, t1, t2);
910 tcg_temp_free(t2);
911 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
912 tcg_temp_free(t1);
913 tcg_gen_mov_i32(REG(B11_8), t0);
914 tcg_temp_free(t0);
916 return;
917 case 0x2008: /* tst Rm,Rn */
919 TCGv val = tcg_temp_new();
920 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
921 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
922 tcg_temp_free(val);
924 return;
925 case 0x200a: /* xor Rm,Rn */
926 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
927 return;
928 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
929 CHECK_FPU_ENABLED
930 if (ctx->flags & FPSCR_SZ) {
931 TCGv_i64 fp = tcg_temp_new_i64();
932 gen_load_fpr64(fp, XREG(B7_4));
933 gen_store_fpr64(fp, XREG(B11_8));
934 tcg_temp_free_i64(fp);
935 } else {
936 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
938 return;
939 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
940 CHECK_FPU_ENABLED
941 if (ctx->flags & FPSCR_SZ) {
942 TCGv addr_hi = tcg_temp_new();
943 int fr = XREG(B7_4);
944 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
945 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
946 ctx->memidx, MO_TEUL);
947 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
948 ctx->memidx, MO_TEUL);
949 tcg_temp_free(addr_hi);
950 } else {
951 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
952 ctx->memidx, MO_TEUL);
954 return;
955 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
956 CHECK_FPU_ENABLED
957 if (ctx->flags & FPSCR_SZ) {
958 TCGv addr_hi = tcg_temp_new();
959 int fr = XREG(B11_8);
960 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
961 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
962 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
963 tcg_temp_free(addr_hi);
964 } else {
965 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
966 ctx->memidx, MO_TEUL);
968 return;
969 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
970 CHECK_FPU_ENABLED
971 if (ctx->flags & FPSCR_SZ) {
972 TCGv addr_hi = tcg_temp_new();
973 int fr = XREG(B11_8);
974 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
975 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
976 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
977 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
978 tcg_temp_free(addr_hi);
979 } else {
980 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
981 ctx->memidx, MO_TEUL);
982 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
984 return;
985 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
986 CHECK_FPU_ENABLED
987 TCGv addr = tcg_temp_new_i32();
988 tcg_gen_subi_i32(addr, REG(B11_8), 4);
989 if (ctx->flags & FPSCR_SZ) {
990 int fr = XREG(B7_4);
991 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
992 tcg_gen_subi_i32(addr, addr, 4);
993 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
994 } else {
995 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
996 ctx->memidx, MO_TEUL);
998 tcg_gen_mov_i32(REG(B11_8), addr);
999 tcg_temp_free(addr);
1000 return;
1001 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1002 CHECK_FPU_ENABLED
1004 TCGv addr = tcg_temp_new_i32();
1005 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1006 if (ctx->flags & FPSCR_SZ) {
1007 int fr = XREG(B11_8);
1008 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1009 ctx->memidx, MO_TEUL);
1010 tcg_gen_addi_i32(addr, addr, 4);
1011 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1012 ctx->memidx, MO_TEUL);
1013 } else {
1014 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1015 ctx->memidx, MO_TEUL);
1017 tcg_temp_free(addr);
1019 return;
1020 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1021 CHECK_FPU_ENABLED
1023 TCGv addr = tcg_temp_new();
1024 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1025 if (ctx->flags & FPSCR_SZ) {
1026 int fr = XREG(B7_4);
1027 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1028 ctx->memidx, MO_TEUL);
1029 tcg_gen_addi_i32(addr, addr, 4);
1030 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1031 ctx->memidx, MO_TEUL);
1032 } else {
1033 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1034 ctx->memidx, MO_TEUL);
1036 tcg_temp_free(addr);
1038 return;
1039 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1040 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1041 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1042 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1043 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1044 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1046 CHECK_FPU_ENABLED
1047 if (ctx->flags & FPSCR_PR) {
1048 TCGv_i64 fp0, fp1;
1050 if (ctx->opcode & 0x0110)
1051 break; /* illegal instruction */
1052 fp0 = tcg_temp_new_i64();
1053 fp1 = tcg_temp_new_i64();
1054 gen_load_fpr64(fp0, DREG(B11_8));
1055 gen_load_fpr64(fp1, DREG(B7_4));
1056 switch (ctx->opcode & 0xf00f) {
1057 case 0xf000: /* fadd Rm,Rn */
1058 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1059 break;
1060 case 0xf001: /* fsub Rm,Rn */
1061 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1062 break;
1063 case 0xf002: /* fmul Rm,Rn */
1064 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1065 break;
1066 case 0xf003: /* fdiv Rm,Rn */
1067 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1068 break;
1069 case 0xf004: /* fcmp/eq Rm,Rn */
1070 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1071 return;
1072 case 0xf005: /* fcmp/gt Rm,Rn */
1073 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1074 return;
1076 gen_store_fpr64(fp0, DREG(B11_8));
1077 tcg_temp_free_i64(fp0);
1078 tcg_temp_free_i64(fp1);
1079 } else {
1080 switch (ctx->opcode & 0xf00f) {
1081 case 0xf000: /* fadd Rm,Rn */
1082 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1083 cpu_fregs[FREG(B11_8)],
1084 cpu_fregs[FREG(B7_4)]);
1085 break;
1086 case 0xf001: /* fsub Rm,Rn */
1087 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1088 cpu_fregs[FREG(B11_8)],
1089 cpu_fregs[FREG(B7_4)]);
1090 break;
1091 case 0xf002: /* fmul Rm,Rn */
1092 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1093 cpu_fregs[FREG(B11_8)],
1094 cpu_fregs[FREG(B7_4)]);
1095 break;
1096 case 0xf003: /* fdiv Rm,Rn */
1097 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1098 cpu_fregs[FREG(B11_8)],
1099 cpu_fregs[FREG(B7_4)]);
1100 break;
1101 case 0xf004: /* fcmp/eq Rm,Rn */
1102 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1103 cpu_fregs[FREG(B7_4)]);
1104 return;
1105 case 0xf005: /* fcmp/gt Rm,Rn */
1106 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1107 cpu_fregs[FREG(B7_4)]);
1108 return;
1112 return;
1113 case 0xf00e: /* fmac FR0,RM,Rn */
1115 CHECK_FPU_ENABLED
1116 if (ctx->flags & FPSCR_PR) {
1117 break; /* illegal instruction */
1118 } else {
1119 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1120 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1121 cpu_fregs[FREG(B11_8)]);
1122 return;
1127 switch (ctx->opcode & 0xff00) {
1128 case 0xc900: /* and #imm,R0 */
1129 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1130 return;
1131 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1133 TCGv addr, val;
1134 addr = tcg_temp_new();
1135 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1136 val = tcg_temp_new();
1137 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1138 tcg_gen_andi_i32(val, val, B7_0);
1139 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1140 tcg_temp_free(val);
1141 tcg_temp_free(addr);
1143 return;
1144 case 0x8b00: /* bf label */
1145 CHECK_NOT_DELAY_SLOT
1146 gen_conditional_jump(ctx, ctx->pc + 2,
1147 ctx->pc + 4 + B7_0s * 2);
1148 ctx->bstate = BS_BRANCH;
1149 return;
1150 case 0x8f00: /* bf/s label */
1151 CHECK_NOT_DELAY_SLOT
1152 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1153 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1154 return;
1155 case 0x8900: /* bt label */
1156 CHECK_NOT_DELAY_SLOT
1157 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1158 ctx->pc + 2);
1159 ctx->bstate = BS_BRANCH;
1160 return;
1161 case 0x8d00: /* bt/s label */
1162 CHECK_NOT_DELAY_SLOT
1163 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1164 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1165 return;
1166 case 0x8800: /* cmp/eq #imm,R0 */
1167 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1168 return;
1169 case 0xc400: /* mov.b @(disp,GBR),R0 */
1171 TCGv addr = tcg_temp_new();
1172 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1173 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1174 tcg_temp_free(addr);
1176 return;
1177 case 0xc500: /* mov.w @(disp,GBR),R0 */
1179 TCGv addr = tcg_temp_new();
1180 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1181 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1182 tcg_temp_free(addr);
1184 return;
1185 case 0xc600: /* mov.l @(disp,GBR),R0 */
1187 TCGv addr = tcg_temp_new();
1188 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1189 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1190 tcg_temp_free(addr);
1192 return;
1193 case 0xc000: /* mov.b R0,@(disp,GBR) */
1195 TCGv addr = tcg_temp_new();
1196 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1197 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1198 tcg_temp_free(addr);
1200 return;
1201 case 0xc100: /* mov.w R0,@(disp,GBR) */
1203 TCGv addr = tcg_temp_new();
1204 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1205 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1206 tcg_temp_free(addr);
1208 return;
1209 case 0xc200: /* mov.l R0,@(disp,GBR) */
1211 TCGv addr = tcg_temp_new();
1212 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1213 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1214 tcg_temp_free(addr);
1216 return;
1217 case 0x8000: /* mov.b R0,@(disp,Rn) */
1219 TCGv addr = tcg_temp_new();
1220 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1221 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1222 tcg_temp_free(addr);
1224 return;
1225 case 0x8100: /* mov.w R0,@(disp,Rn) */
1227 TCGv addr = tcg_temp_new();
1228 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1229 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1230 tcg_temp_free(addr);
1232 return;
1233 case 0x8400: /* mov.b @(disp,Rn),R0 */
1235 TCGv addr = tcg_temp_new();
1236 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1237 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1238 tcg_temp_free(addr);
1240 return;
1241 case 0x8500: /* mov.w @(disp,Rn),R0 */
1243 TCGv addr = tcg_temp_new();
1244 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1245 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1246 tcg_temp_free(addr);
1248 return;
1249 case 0xc700: /* mova @(disp,PC),R0 */
1250 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1251 return;
1252 case 0xcb00: /* or #imm,R0 */
1253 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1254 return;
1255 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1257 TCGv addr, val;
1258 addr = tcg_temp_new();
1259 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1260 val = tcg_temp_new();
1261 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1262 tcg_gen_ori_i32(val, val, B7_0);
1263 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1264 tcg_temp_free(val);
1265 tcg_temp_free(addr);
1267 return;
1268 case 0xc300: /* trapa #imm */
1270 TCGv imm;
1271 CHECK_NOT_DELAY_SLOT
1272 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1273 imm = tcg_const_i32(B7_0);
1274 gen_helper_trapa(cpu_env, imm);
1275 tcg_temp_free(imm);
1276 ctx->bstate = BS_BRANCH;
1278 return;
1279 case 0xc800: /* tst #imm,R0 */
1281 TCGv val = tcg_temp_new();
1282 tcg_gen_andi_i32(val, REG(0), B7_0);
1283 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1284 tcg_temp_free(val);
1286 return;
1287 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1289 TCGv val = tcg_temp_new();
1290 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1291 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1292 tcg_gen_andi_i32(val, val, B7_0);
1293 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1294 tcg_temp_free(val);
1296 return;
1297 case 0xca00: /* xor #imm,R0 */
1298 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1299 return;
1300 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1302 TCGv addr, val;
1303 addr = tcg_temp_new();
1304 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1305 val = tcg_temp_new();
1306 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1307 tcg_gen_xori_i32(val, val, B7_0);
1308 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1309 tcg_temp_free(val);
1310 tcg_temp_free(addr);
1312 return;
1315 switch (ctx->opcode & 0xf08f) {
1316 case 0x408e: /* ldc Rm,Rn_BANK */
1317 CHECK_PRIVILEGED
1318 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1319 return;
1320 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1321 CHECK_PRIVILEGED
1322 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1323 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1324 return;
1325 case 0x0082: /* stc Rm_BANK,Rn */
1326 CHECK_PRIVILEGED
1327 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1328 return;
1329 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1330 CHECK_PRIVILEGED
1332 TCGv addr = tcg_temp_new();
1333 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1334 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1335 tcg_gen_mov_i32(REG(B11_8), addr);
1336 tcg_temp_free(addr);
1338 return;
1341 switch (ctx->opcode & 0xf0ff) {
1342 case 0x0023: /* braf Rn */
1343 CHECK_NOT_DELAY_SLOT
1344 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1345 ctx->flags |= DELAY_SLOT;
1346 ctx->delayed_pc = (uint32_t) - 1;
1347 return;
1348 case 0x0003: /* bsrf Rn */
1349 CHECK_NOT_DELAY_SLOT
1350 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1351 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1352 ctx->flags |= DELAY_SLOT;
1353 ctx->delayed_pc = (uint32_t) - 1;
1354 return;
1355 case 0x4015: /* cmp/pl Rn */
1356 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1357 return;
1358 case 0x4011: /* cmp/pz Rn */
1359 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1360 return;
1361 case 0x4010: /* dt Rn */
1362 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1363 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1364 return;
1365 case 0x402b: /* jmp @Rn */
1366 CHECK_NOT_DELAY_SLOT
1367 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1368 ctx->flags |= DELAY_SLOT;
1369 ctx->delayed_pc = (uint32_t) - 1;
1370 return;
1371 case 0x400b: /* jsr @Rn */
1372 CHECK_NOT_DELAY_SLOT
1373 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1374 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1375 ctx->flags |= DELAY_SLOT;
1376 ctx->delayed_pc = (uint32_t) - 1;
1377 return;
1378 case 0x400e: /* ldc Rm,SR */
1379 CHECK_PRIVILEGED
1381 TCGv val = tcg_temp_new();
1382 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1383 gen_write_sr(val);
1384 tcg_temp_free(val);
1385 ctx->bstate = BS_STOP;
1387 return;
1388 case 0x4007: /* ldc.l @Rm+,SR */
1389 CHECK_PRIVILEGED
1391 TCGv val = tcg_temp_new();
1392 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1393 tcg_gen_andi_i32(val, val, 0x700083f3);
1394 gen_write_sr(val);
1395 tcg_temp_free(val);
1396 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1397 ctx->bstate = BS_STOP;
1399 return;
1400 case 0x0002: /* stc SR,Rn */
1401 CHECK_PRIVILEGED
1402 gen_read_sr(REG(B11_8));
1403 return;
1404 case 0x4003: /* stc SR,@-Rn */
1405 CHECK_PRIVILEGED
1407 TCGv addr = tcg_temp_new();
1408 TCGv val = tcg_temp_new();
1409 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1410 gen_read_sr(val);
1411 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1412 tcg_gen_mov_i32(REG(B11_8), addr);
1413 tcg_temp_free(val);
1414 tcg_temp_free(addr);
1416 return;
1417 #define LD(reg,ldnum,ldpnum,prechk) \
1418 case ldnum: \
1419 prechk \
1420 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1421 return; \
1422 case ldpnum: \
1423 prechk \
1424 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1425 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1426 return;
1427 #define ST(reg,stnum,stpnum,prechk) \
1428 case stnum: \
1429 prechk \
1430 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1431 return; \
1432 case stpnum: \
1433 prechk \
1435 TCGv addr = tcg_temp_new(); \
1436 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1437 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1438 tcg_gen_mov_i32(REG(B11_8), addr); \
1439 tcg_temp_free(addr); \
1441 return;
1442 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1443 LD(reg,ldnum,ldpnum,prechk) \
1444 ST(reg,stnum,stpnum,prechk)
1445 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1446 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1447 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1448 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1449 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1450 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1451 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1452 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1453 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1454 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1455 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1456 case 0x406a: /* lds Rm,FPSCR */
1457 CHECK_FPU_ENABLED
1458 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1459 ctx->bstate = BS_STOP;
1460 return;
1461 case 0x4066: /* lds.l @Rm+,FPSCR */
1462 CHECK_FPU_ENABLED
1464 TCGv addr = tcg_temp_new();
1465 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1466 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1467 gen_helper_ld_fpscr(cpu_env, addr);
1468 tcg_temp_free(addr);
1469 ctx->bstate = BS_STOP;
1471 return;
1472 case 0x006a: /* sts FPSCR,Rn */
1473 CHECK_FPU_ENABLED
1474 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1475 return;
1476 case 0x4062: /* sts FPSCR,@-Rn */
1477 CHECK_FPU_ENABLED
1479 TCGv addr, val;
1480 val = tcg_temp_new();
1481 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1482 addr = tcg_temp_new();
1483 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1484 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1485 tcg_gen_mov_i32(REG(B11_8), addr);
1486 tcg_temp_free(addr);
1487 tcg_temp_free(val);
1489 return;
1490 case 0x00c3: /* movca.l R0,@Rm */
1492 TCGv val = tcg_temp_new();
1493 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1494 gen_helper_movcal(cpu_env, REG(B11_8), val);
1495 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1497 ctx->has_movcal = 1;
1498 return;
1499 case 0x40a9:
1500 /* MOVUA.L @Rm,R0 (Rm) -> R0
1501 Load non-boundary-aligned data */
1502 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1503 return;
1504 case 0x40e9:
1505 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1506 Load non-boundary-aligned data */
1507 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1508 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1509 return;
1510 case 0x0029: /* movt Rn */
1511 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1512 return;
1513 case 0x0073:
1514 /* MOVCO.L
1515 LDST -> T
1516 If (T == 1) R0 -> (Rn)
1517 0 -> LDST
1519 if (ctx->features & SH_FEATURE_SH4A) {
1520 TCGLabel *label = gen_new_label();
1521 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1522 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1523 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1524 gen_set_label(label);
1525 tcg_gen_movi_i32(cpu_ldst, 0);
1526 return;
1527 } else
1528 break;
1529 case 0x0063:
1530 /* MOVLI.L @Rm,R0
1531 1 -> LDST
1532 (Rm) -> R0
1533 When interrupt/exception
1534 occurred 0 -> LDST
1536 if (ctx->features & SH_FEATURE_SH4A) {
1537 tcg_gen_movi_i32(cpu_ldst, 0);
1538 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1539 tcg_gen_movi_i32(cpu_ldst, 1);
1540 return;
1541 } else
1542 break;
1543 case 0x0093: /* ocbi @Rn */
1545 gen_helper_ocbi(cpu_env, REG(B11_8));
1547 return;
1548 case 0x00a3: /* ocbp @Rn */
1549 case 0x00b3: /* ocbwb @Rn */
1550 /* These instructions are supposed to do nothing in case of
1551 a cache miss. Given that we only partially emulate caches
1552 it is safe to simply ignore them. */
1553 return;
1554 case 0x0083: /* pref @Rn */
1555 return;
1556 case 0x00d3: /* prefi @Rn */
1557 if (ctx->features & SH_FEATURE_SH4A)
1558 return;
1559 else
1560 break;
1561 case 0x00e3: /* icbi @Rn */
1562 if (ctx->features & SH_FEATURE_SH4A)
1563 return;
1564 else
1565 break;
1566 case 0x00ab: /* synco */
1567 if (ctx->features & SH_FEATURE_SH4A)
1568 return;
1569 else
1570 break;
1571 case 0x4024: /* rotcl Rn */
1573 TCGv tmp = tcg_temp_new();
1574 tcg_gen_mov_i32(tmp, cpu_sr_t);
1575 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1576 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1577 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1578 tcg_temp_free(tmp);
1580 return;
1581 case 0x4025: /* rotcr Rn */
1583 TCGv tmp = tcg_temp_new();
1584 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1585 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1586 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1587 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1588 tcg_temp_free(tmp);
1590 return;
1591 case 0x4004: /* rotl Rn */
1592 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1593 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1594 return;
1595 case 0x4005: /* rotr Rn */
1596 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1597 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1598 return;
1599 case 0x4000: /* shll Rn */
1600 case 0x4020: /* shal Rn */
1601 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1602 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1603 return;
1604 case 0x4021: /* shar Rn */
1605 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1606 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1607 return;
1608 case 0x4001: /* shlr Rn */
1609 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1610 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1611 return;
1612 case 0x4008: /* shll2 Rn */
1613 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1614 return;
1615 case 0x4018: /* shll8 Rn */
1616 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1617 return;
1618 case 0x4028: /* shll16 Rn */
1619 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1620 return;
1621 case 0x4009: /* shlr2 Rn */
1622 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1623 return;
1624 case 0x4019: /* shlr8 Rn */
1625 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1626 return;
1627 case 0x4029: /* shlr16 Rn */
1628 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1629 return;
1630 case 0x401b: /* tas.b @Rn */
1632 TCGv addr, val;
1633 addr = tcg_temp_local_new();
1634 tcg_gen_mov_i32(addr, REG(B11_8));
1635 val = tcg_temp_local_new();
1636 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1637 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1638 tcg_gen_ori_i32(val, val, 0x80);
1639 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1640 tcg_temp_free(val);
1641 tcg_temp_free(addr);
1643 return;
1644 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1645 CHECK_FPU_ENABLED
1646 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1647 return;
1648 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1649 CHECK_FPU_ENABLED
1650 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1651 return;
1652 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1653 CHECK_FPU_ENABLED
1654 if (ctx->flags & FPSCR_PR) {
1655 TCGv_i64 fp;
1656 if (ctx->opcode & 0x0100)
1657 break; /* illegal instruction */
1658 fp = tcg_temp_new_i64();
1659 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1660 gen_store_fpr64(fp, DREG(B11_8));
1661 tcg_temp_free_i64(fp);
1663 else {
1664 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1666 return;
1667 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1668 CHECK_FPU_ENABLED
1669 if (ctx->flags & FPSCR_PR) {
1670 TCGv_i64 fp;
1671 if (ctx->opcode & 0x0100)
1672 break; /* illegal instruction */
1673 fp = tcg_temp_new_i64();
1674 gen_load_fpr64(fp, DREG(B11_8));
1675 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1676 tcg_temp_free_i64(fp);
1678 else {
1679 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1681 return;
1682 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1683 CHECK_FPU_ENABLED
1685 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1687 return;
1688 case 0xf05d: /* fabs FRn/DRn */
1689 CHECK_FPU_ENABLED
1690 if (ctx->flags & FPSCR_PR) {
1691 if (ctx->opcode & 0x0100)
1692 break; /* illegal instruction */
1693 TCGv_i64 fp = tcg_temp_new_i64();
1694 gen_load_fpr64(fp, DREG(B11_8));
1695 gen_helper_fabs_DT(fp, fp);
1696 gen_store_fpr64(fp, DREG(B11_8));
1697 tcg_temp_free_i64(fp);
1698 } else {
1699 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1701 return;
1702 case 0xf06d: /* fsqrt FRn */
1703 CHECK_FPU_ENABLED
1704 if (ctx->flags & FPSCR_PR) {
1705 if (ctx->opcode & 0x0100)
1706 break; /* illegal instruction */
1707 TCGv_i64 fp = tcg_temp_new_i64();
1708 gen_load_fpr64(fp, DREG(B11_8));
1709 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1710 gen_store_fpr64(fp, DREG(B11_8));
1711 tcg_temp_free_i64(fp);
1712 } else {
1713 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1714 cpu_fregs[FREG(B11_8)]);
1716 return;
1717 case 0xf07d: /* fsrra FRn */
1718 CHECK_FPU_ENABLED
1719 break;
1720 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1721 CHECK_FPU_ENABLED
1722 if (!(ctx->flags & FPSCR_PR)) {
1723 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1725 return;
1726 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1727 CHECK_FPU_ENABLED
1728 if (!(ctx->flags & FPSCR_PR)) {
1729 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1731 return;
1732 case 0xf0ad: /* fcnvsd FPUL,DRn */
1733 CHECK_FPU_ENABLED
1735 TCGv_i64 fp = tcg_temp_new_i64();
1736 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1737 gen_store_fpr64(fp, DREG(B11_8));
1738 tcg_temp_free_i64(fp);
1740 return;
1741 case 0xf0bd: /* fcnvds DRn,FPUL */
1742 CHECK_FPU_ENABLED
1744 TCGv_i64 fp = tcg_temp_new_i64();
1745 gen_load_fpr64(fp, DREG(B11_8));
1746 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1747 tcg_temp_free_i64(fp);
1749 return;
1750 case 0xf0ed: /* fipr FVm,FVn */
1751 CHECK_FPU_ENABLED
1752 if ((ctx->flags & FPSCR_PR) == 0) {
1753 TCGv m, n;
1754 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1755 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1756 gen_helper_fipr(cpu_env, m, n);
1757 tcg_temp_free(m);
1758 tcg_temp_free(n);
1759 return;
1761 break;
1762 case 0xf0fd: /* ftrv XMTRX,FVn */
1763 CHECK_FPU_ENABLED
1764 if ((ctx->opcode & 0x0300) == 0x0100 &&
1765 (ctx->flags & FPSCR_PR) == 0) {
1766 TCGv n;
1767 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1768 gen_helper_ftrv(cpu_env, n);
1769 tcg_temp_free(n);
1770 return;
1772 break;
1774 #if 0
1775 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1776 ctx->opcode, ctx->pc);
1777 fflush(stderr);
1778 #endif
1779 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1780 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1781 gen_helper_raise_slot_illegal_instruction(cpu_env);
1782 } else {
1783 gen_helper_raise_illegal_instruction(cpu_env);
1785 ctx->bstate = BS_BRANCH;
1788 static void decode_opc(DisasContext * ctx)
1790 uint32_t old_flags = ctx->flags;
1792 _decode_opc(ctx);
1794 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1795 if (ctx->flags & DELAY_SLOT_CLEARME) {
1796 gen_store_flags(0);
1797 } else {
1798 /* go out of the delay slot */
1799 uint32_t new_flags = ctx->flags;
1800 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1801 gen_store_flags(new_flags);
1803 ctx->flags = 0;
1804 ctx->bstate = BS_BRANCH;
1805 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1806 gen_delayed_conditional_jump(ctx);
1807 } else if (old_flags & DELAY_SLOT) {
1808 gen_jump(ctx);
1813 /* go into a delay slot */
1814 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1815 gen_store_flags(ctx->flags);
1818 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1820 SuperHCPU *cpu = sh_env_get_cpu(env);
1821 CPUState *cs = CPU(cpu);
1822 DisasContext ctx;
1823 target_ulong pc_start;
1824 int num_insns;
1825 int max_insns;
1827 pc_start = tb->pc;
1828 ctx.pc = pc_start;
1829 ctx.flags = (uint32_t)tb->flags;
1830 ctx.bstate = BS_NONE;
1831 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1832 /* We don't know if the delayed pc came from a dynamic or static branch,
1833 so assume it is a dynamic branch. */
1834 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1835 ctx.tb = tb;
1836 ctx.singlestep_enabled = cs->singlestep_enabled;
1837 ctx.features = env->features;
1838 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1840 num_insns = 0;
1841 max_insns = tb->cflags & CF_COUNT_MASK;
1842 if (max_insns == 0) {
1843 max_insns = CF_COUNT_MASK;
1845 if (max_insns > TCG_MAX_INSNS) {
1846 max_insns = TCG_MAX_INSNS;
1849 gen_tb_start(tb);
1850 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1851 tcg_gen_insn_start(ctx.pc, ctx.flags);
1852 num_insns++;
1854 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1855 /* We have hit a breakpoint - make sure PC is up-to-date */
1856 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1857 gen_helper_debug(cpu_env);
1858 ctx.bstate = BS_BRANCH;
1859 /* The address covered by the breakpoint must be included in
1860 [tb->pc, tb->pc + tb->size) in order to for it to be
1861 properly cleared -- thus we increment the PC here so that
1862 the logic setting tb->size below does the right thing. */
1863 ctx.pc += 2;
1864 break;
1867 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1868 gen_io_start();
1871 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1872 decode_opc(&ctx);
1873 ctx.pc += 2;
1874 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1875 break;
1876 if (cs->singlestep_enabled) {
1877 break;
1879 if (num_insns >= max_insns)
1880 break;
1881 if (singlestep)
1882 break;
1884 if (tb->cflags & CF_LAST_IO)
1885 gen_io_end();
1886 if (cs->singlestep_enabled) {
1887 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1888 gen_helper_debug(cpu_env);
1889 } else {
1890 switch (ctx.bstate) {
1891 case BS_STOP:
1892 /* gen_op_interrupt_restart(); */
1893 /* fall through */
1894 case BS_NONE:
1895 if (ctx.flags) {
1896 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1898 gen_goto_tb(&ctx, 0, ctx.pc);
1899 break;
1900 case BS_EXCP:
1901 /* gen_op_interrupt_restart(); */
1902 tcg_gen_exit_tb(0);
1903 break;
1904 case BS_BRANCH:
1905 default:
1906 break;
1910 gen_tb_end(tb, num_insns);
1912 tb->size = ctx.pc - pc_start;
1913 tb->icount = num_insns;
1915 #ifdef DEBUG_DISAS
1916 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1917 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1918 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1919 qemu_log("\n");
1921 #endif
1924 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1925 target_ulong *data)
1927 env->pc = data[0];
1928 env->flags = data[1];