intel_iommu: add IR translation faults defines
[qemu/ar7.git] / target-sh4 / translate.c
blobca80cf70ca84bf48ff669e13e80d4983626dac1c
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
39 uint16_t opcode;
40 uint32_t flags;
41 int bstate;
42 int memidx;
43 uint32_t delayed_pc;
44 int singlestep_enabled;
45 uint32_t features;
46 int has_movcal;
47 } DisasContext;
49 #if defined(CONFIG_USER_ONLY)
50 #define IS_USER(ctx) 1
51 #else
52 #define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
53 #endif
55 enum {
56 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
57 * exception condition
59 BS_STOP = 1, /* We want to stop translation for any reason */
60 BS_BRANCH = 2, /* We reached a branch condition */
61 BS_EXCP = 3, /* We reached an exception condition */
64 /* global register indexes */
65 static TCGv_env cpu_env;
66 static TCGv cpu_gregs[24];
67 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
69 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
70 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
71 static TCGv cpu_fregs[32];
73 /* internal register indexes */
74 static TCGv cpu_flags, cpu_delayed_pc;
76 #include "exec/gen-icount.h"
78 void sh4_translate_init(void)
80 int i;
81 static int done_init = 0;
82 static const char * const gregnames[24] = {
83 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
84 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
85 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
86 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
87 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
89 static const char * const fregnames[32] = {
90 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
91 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
92 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
93 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
94 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
95 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
96 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
97 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
100 if (done_init)
101 return;
103 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
104 tcg_ctx.tcg_env = cpu_env;
106 for (i = 0; i < 24; i++)
107 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
108 offsetof(CPUSH4State, gregs[i]),
109 gregnames[i]);
111 cpu_pc = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, pc), "PC");
113 cpu_sr = tcg_global_mem_new_i32(cpu_env,
114 offsetof(CPUSH4State, sr), "SR");
115 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
116 offsetof(CPUSH4State, sr_m), "SR_M");
117 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
118 offsetof(CPUSH4State, sr_q), "SR_Q");
119 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
120 offsetof(CPUSH4State, sr_t), "SR_T");
121 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, ssr), "SSR");
123 cpu_spc = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, spc), "SPC");
125 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, gbr), "GBR");
127 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
128 offsetof(CPUSH4State, vbr), "VBR");
129 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
130 offsetof(CPUSH4State, sgr), "SGR");
131 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
132 offsetof(CPUSH4State, dbr), "DBR");
133 cpu_mach = tcg_global_mem_new_i32(cpu_env,
134 offsetof(CPUSH4State, mach), "MACH");
135 cpu_macl = tcg_global_mem_new_i32(cpu_env,
136 offsetof(CPUSH4State, macl), "MACL");
137 cpu_pr = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, pr), "PR");
139 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, fpscr), "FPSCR");
141 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
142 offsetof(CPUSH4State, fpul), "FPUL");
144 cpu_flags = tcg_global_mem_new_i32(cpu_env,
145 offsetof(CPUSH4State, flags), "_flags_");
146 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, delayed_pc),
148 "_delayed_pc_");
149 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, ldst), "_ldst_");
152 for (i = 0; i < 32; i++)
153 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
154 offsetof(CPUSH4State, fregs[i]),
155 fregnames[i]);
157 done_init = 1;
160 void superh_cpu_dump_state(CPUState *cs, FILE *f,
161 fprintf_function cpu_fprintf, int flags)
163 SuperHCPU *cpu = SUPERH_CPU(cs);
164 CPUSH4State *env = &cpu->env;
165 int i;
166 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169 env->spc, env->ssr, env->gbr, env->vbr);
170 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171 env->sgr, env->dbr, env->delayed_pc, env->fpul);
172 for (i = 0; i < 24; i += 4) {
173 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174 i, env->gregs[i], i + 1, env->gregs[i + 1],
175 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
177 if (env->flags & DELAY_SLOT) {
178 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179 env->delayed_pc);
180 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
181 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182 env->delayed_pc);
186 static void gen_read_sr(TCGv dst)
188 TCGv t0 = tcg_temp_new();
189 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
190 tcg_gen_or_i32(dst, dst, t0);
191 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
194 tcg_gen_or_i32(dst, cpu_sr, t0);
195 tcg_temp_free_i32(t0);
198 static void gen_write_sr(TCGv src)
200 tcg_gen_andi_i32(cpu_sr, src,
201 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
202 tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
203 tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
204 tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
205 tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
206 tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
207 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
210 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
212 if (unlikely(ctx->singlestep_enabled)) {
213 return false;
216 #ifndef CONFIG_USER_ONLY
217 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
218 #else
219 return true;
220 #endif
223 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
225 if (use_goto_tb(ctx, dest)) {
226 /* Use a direct jump if in same page and singlestep not enabled */
227 tcg_gen_goto_tb(n);
228 tcg_gen_movi_i32(cpu_pc, dest);
229 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
230 } else {
231 tcg_gen_movi_i32(cpu_pc, dest);
232 if (ctx->singlestep_enabled)
233 gen_helper_debug(cpu_env);
234 tcg_gen_exit_tb(0);
238 static void gen_jump(DisasContext * ctx)
240 if (ctx->delayed_pc == (uint32_t) - 1) {
241 /* Target is not statically known, it comes necessarily from a
242 delayed jump as immediate jump are conditinal jumps */
243 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
244 if (ctx->singlestep_enabled)
245 gen_helper_debug(cpu_env);
246 tcg_gen_exit_tb(0);
247 } else {
248 gen_goto_tb(ctx, 0, ctx->delayed_pc);
252 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
254 TCGLabel *label = gen_new_label();
255 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
256 tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
257 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
258 gen_set_label(label);
261 /* Immediate conditional jump (bt or bf) */
262 static void gen_conditional_jump(DisasContext * ctx,
263 target_ulong ift, target_ulong ifnott)
265 TCGLabel *l1 = gen_new_label();
266 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
267 gen_goto_tb(ctx, 0, ifnott);
268 gen_set_label(l1);
269 gen_goto_tb(ctx, 1, ift);
272 /* Delayed conditional jump (bt or bf) */
273 static void gen_delayed_conditional_jump(DisasContext * ctx)
275 TCGLabel *l1;
276 TCGv ds;
278 l1 = gen_new_label();
279 ds = tcg_temp_new();
280 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
281 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
282 gen_goto_tb(ctx, 1, ctx->pc + 2);
283 gen_set_label(l1);
284 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
285 gen_jump(ctx);
288 static inline void gen_store_flags(uint32_t flags)
290 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
291 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
294 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
296 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
299 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
301 TCGv_i32 tmp = tcg_temp_new_i32();
302 tcg_gen_extrl_i64_i32(tmp, t);
303 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
304 tcg_gen_shri_i64(t, t, 32);
305 tcg_gen_extrl_i64_i32(tmp, t);
306 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
307 tcg_temp_free_i32(tmp);
310 #define B3_0 (ctx->opcode & 0xf)
311 #define B6_4 ((ctx->opcode >> 4) & 0x7)
312 #define B7_4 ((ctx->opcode >> 4) & 0xf)
313 #define B7_0 (ctx->opcode & 0xff)
314 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
315 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
316 (ctx->opcode & 0xfff))
317 #define B11_8 ((ctx->opcode >> 8) & 0xf)
318 #define B15_12 ((ctx->opcode >> 12) & 0xf)
320 #define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
321 && (ctx->flags & (1u << SR_RB))\
322 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
324 #define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
325 || !(ctx->flags & (1u << SR_RB)))\
326 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
329 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
330 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
331 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
333 #define CHECK_NOT_DELAY_SLOT \
334 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
336 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
337 gen_helper_raise_slot_illegal_instruction(cpu_env); \
338 ctx->bstate = BS_BRANCH; \
339 return; \
342 #define CHECK_PRIVILEGED \
343 if (IS_USER(ctx)) { \
344 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
345 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
346 gen_helper_raise_slot_illegal_instruction(cpu_env); \
347 } else { \
348 gen_helper_raise_illegal_instruction(cpu_env); \
350 ctx->bstate = BS_BRANCH; \
351 return; \
354 #define CHECK_FPU_ENABLED \
355 if (ctx->flags & (1u << SR_FD)) { \
356 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
357 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
358 gen_helper_raise_slot_fpu_disable(cpu_env); \
359 } else { \
360 gen_helper_raise_fpu_disable(cpu_env); \
362 ctx->bstate = BS_BRANCH; \
363 return; \
366 static void _decode_opc(DisasContext * ctx)
368 /* This code tries to make movcal emulation sufficiently
369 accurate for Linux purposes. This instruction writes
370 memory, and prior to that, always allocates a cache line.
371 It is used in two contexts:
372 - in memcpy, where data is copied in blocks, the first write
373 of to a block uses movca.l for performance.
374 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
375 to flush the cache. Here, the data written by movcal.l is never
376 written to memory, and the data written is just bogus.
378 To simulate this, we simulate movcal.l, we store the value to memory,
379 but we also remember the previous content. If we see ocbi, we check
380 if movcal.l for that address was done previously. If so, the write should
381 not have hit the memory, so we restore the previous content.
382 When we see an instruction that is neither movca.l
383 nor ocbi, the previous content is discarded.
385 To optimize, we only try to flush stores when we're at the start of
386 TB, or if we already saw movca.l in this TB and did not flush stores
387 yet. */
388 if (ctx->has_movcal)
390 int opcode = ctx->opcode & 0xf0ff;
391 if (opcode != 0x0093 /* ocbi */
392 && opcode != 0x00c3 /* movca.l */)
394 gen_helper_discard_movcal_backup(cpu_env);
395 ctx->has_movcal = 0;
399 #if 0
400 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
401 #endif
403 switch (ctx->opcode) {
404 case 0x0019: /* div0u */
405 tcg_gen_movi_i32(cpu_sr_m, 0);
406 tcg_gen_movi_i32(cpu_sr_q, 0);
407 tcg_gen_movi_i32(cpu_sr_t, 0);
408 return;
409 case 0x000b: /* rts */
410 CHECK_NOT_DELAY_SLOT
411 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
412 ctx->flags |= DELAY_SLOT;
413 ctx->delayed_pc = (uint32_t) - 1;
414 return;
415 case 0x0028: /* clrmac */
416 tcg_gen_movi_i32(cpu_mach, 0);
417 tcg_gen_movi_i32(cpu_macl, 0);
418 return;
419 case 0x0048: /* clrs */
420 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
421 return;
422 case 0x0008: /* clrt */
423 tcg_gen_movi_i32(cpu_sr_t, 0);
424 return;
425 case 0x0038: /* ldtlb */
426 CHECK_PRIVILEGED
427 gen_helper_ldtlb(cpu_env);
428 return;
429 case 0x002b: /* rte */
430 CHECK_PRIVILEGED
431 CHECK_NOT_DELAY_SLOT
432 gen_write_sr(cpu_ssr);
433 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
434 ctx->flags |= DELAY_SLOT;
435 ctx->delayed_pc = (uint32_t) - 1;
436 return;
437 case 0x0058: /* sets */
438 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
439 return;
440 case 0x0018: /* sett */
441 tcg_gen_movi_i32(cpu_sr_t, 1);
442 return;
443 case 0xfbfd: /* frchg */
444 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
445 ctx->bstate = BS_STOP;
446 return;
447 case 0xf3fd: /* fschg */
448 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
449 ctx->bstate = BS_STOP;
450 return;
451 case 0x0009: /* nop */
452 return;
453 case 0x001b: /* sleep */
454 CHECK_PRIVILEGED
455 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
456 gen_helper_sleep(cpu_env);
457 return;
460 switch (ctx->opcode & 0xf000) {
461 case 0x1000: /* mov.l Rm,@(disp,Rn) */
463 TCGv addr = tcg_temp_new();
464 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
465 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
466 tcg_temp_free(addr);
468 return;
469 case 0x5000: /* mov.l @(disp,Rm),Rn */
471 TCGv addr = tcg_temp_new();
472 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
473 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
474 tcg_temp_free(addr);
476 return;
477 case 0xe000: /* mov #imm,Rn */
478 tcg_gen_movi_i32(REG(B11_8), B7_0s);
479 return;
480 case 0x9000: /* mov.w @(disp,PC),Rn */
482 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
483 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
484 tcg_temp_free(addr);
486 return;
487 case 0xd000: /* mov.l @(disp,PC),Rn */
489 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
490 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
491 tcg_temp_free(addr);
493 return;
494 case 0x7000: /* add #imm,Rn */
495 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
496 return;
497 case 0xa000: /* bra disp */
498 CHECK_NOT_DELAY_SLOT
499 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
500 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
501 ctx->flags |= DELAY_SLOT;
502 return;
503 case 0xb000: /* bsr disp */
504 CHECK_NOT_DELAY_SLOT
505 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
506 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
507 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
508 ctx->flags |= DELAY_SLOT;
509 return;
512 switch (ctx->opcode & 0xf00f) {
513 case 0x6003: /* mov Rm,Rn */
514 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
515 return;
516 case 0x2000: /* mov.b Rm,@Rn */
517 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
518 return;
519 case 0x2001: /* mov.w Rm,@Rn */
520 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
521 return;
522 case 0x2002: /* mov.l Rm,@Rn */
523 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
524 return;
525 case 0x6000: /* mov.b @Rm,Rn */
526 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
527 return;
528 case 0x6001: /* mov.w @Rm,Rn */
529 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
530 return;
531 case 0x6002: /* mov.l @Rm,Rn */
532 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
533 return;
534 case 0x2004: /* mov.b Rm,@-Rn */
536 TCGv addr = tcg_temp_new();
537 tcg_gen_subi_i32(addr, REG(B11_8), 1);
538 /* might cause re-execution */
539 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
540 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
541 tcg_temp_free(addr);
543 return;
544 case 0x2005: /* mov.w Rm,@-Rn */
546 TCGv addr = tcg_temp_new();
547 tcg_gen_subi_i32(addr, REG(B11_8), 2);
548 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
549 tcg_gen_mov_i32(REG(B11_8), addr);
550 tcg_temp_free(addr);
552 return;
553 case 0x2006: /* mov.l Rm,@-Rn */
555 TCGv addr = tcg_temp_new();
556 tcg_gen_subi_i32(addr, REG(B11_8), 4);
557 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
558 tcg_gen_mov_i32(REG(B11_8), addr);
560 return;
561 case 0x6004: /* mov.b @Rm+,Rn */
562 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
563 if ( B11_8 != B7_4 )
564 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
565 return;
566 case 0x6005: /* mov.w @Rm+,Rn */
567 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
568 if ( B11_8 != B7_4 )
569 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
570 return;
571 case 0x6006: /* mov.l @Rm+,Rn */
572 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
573 if ( B11_8 != B7_4 )
574 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
575 return;
576 case 0x0004: /* mov.b Rm,@(R0,Rn) */
578 TCGv addr = tcg_temp_new();
579 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
580 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
581 tcg_temp_free(addr);
583 return;
584 case 0x0005: /* mov.w Rm,@(R0,Rn) */
586 TCGv addr = tcg_temp_new();
587 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
588 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
589 tcg_temp_free(addr);
591 return;
592 case 0x0006: /* mov.l Rm,@(R0,Rn) */
594 TCGv addr = tcg_temp_new();
595 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
596 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
597 tcg_temp_free(addr);
599 return;
600 case 0x000c: /* mov.b @(R0,Rm),Rn */
602 TCGv addr = tcg_temp_new();
603 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
604 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
605 tcg_temp_free(addr);
607 return;
608 case 0x000d: /* mov.w @(R0,Rm),Rn */
610 TCGv addr = tcg_temp_new();
611 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
612 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
613 tcg_temp_free(addr);
615 return;
616 case 0x000e: /* mov.l @(R0,Rm),Rn */
618 TCGv addr = tcg_temp_new();
619 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
620 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
621 tcg_temp_free(addr);
623 return;
624 case 0x6008: /* swap.b Rm,Rn */
626 TCGv low = tcg_temp_new();;
627 tcg_gen_ext16u_i32(low, REG(B7_4));
628 tcg_gen_bswap16_i32(low, low);
629 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
630 tcg_temp_free(low);
632 return;
633 case 0x6009: /* swap.w Rm,Rn */
634 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
635 return;
636 case 0x200d: /* xtrct Rm,Rn */
638 TCGv high, low;
639 high = tcg_temp_new();
640 tcg_gen_shli_i32(high, REG(B7_4), 16);
641 low = tcg_temp_new();
642 tcg_gen_shri_i32(low, REG(B11_8), 16);
643 tcg_gen_or_i32(REG(B11_8), high, low);
644 tcg_temp_free(low);
645 tcg_temp_free(high);
647 return;
648 case 0x300c: /* add Rm,Rn */
649 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
650 return;
651 case 0x300e: /* addc Rm,Rn */
653 TCGv t0, t1;
654 t0 = tcg_const_tl(0);
655 t1 = tcg_temp_new();
656 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
657 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
658 REG(B11_8), t0, t1, cpu_sr_t);
659 tcg_temp_free(t0);
660 tcg_temp_free(t1);
662 return;
663 case 0x300f: /* addv Rm,Rn */
665 TCGv t0, t1, t2;
666 t0 = tcg_temp_new();
667 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
668 t1 = tcg_temp_new();
669 tcg_gen_xor_i32(t1, t0, REG(B11_8));
670 t2 = tcg_temp_new();
671 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
672 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
673 tcg_temp_free(t2);
674 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
675 tcg_temp_free(t1);
676 tcg_gen_mov_i32(REG(B7_4), t0);
677 tcg_temp_free(t0);
679 return;
680 case 0x2009: /* and Rm,Rn */
681 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
682 return;
683 case 0x3000: /* cmp/eq Rm,Rn */
684 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
685 return;
686 case 0x3003: /* cmp/ge Rm,Rn */
687 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
688 return;
689 case 0x3007: /* cmp/gt Rm,Rn */
690 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
691 return;
692 case 0x3006: /* cmp/hi Rm,Rn */
693 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
694 return;
695 case 0x3002: /* cmp/hs Rm,Rn */
696 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
697 return;
698 case 0x200c: /* cmp/str Rm,Rn */
700 TCGv cmp1 = tcg_temp_new();
701 TCGv cmp2 = tcg_temp_new();
702 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
703 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
704 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
705 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
706 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
707 tcg_temp_free(cmp2);
708 tcg_temp_free(cmp1);
710 return;
711 case 0x2007: /* div0s Rm,Rn */
712 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
713 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
714 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
715 return;
716 case 0x3004: /* div1 Rm,Rn */
718 TCGv t0 = tcg_temp_new();
719 TCGv t1 = tcg_temp_new();
720 TCGv t2 = tcg_temp_new();
721 TCGv zero = tcg_const_i32(0);
723 /* shift left arg1, saving the bit being pushed out and inserting
724 T on the right */
725 tcg_gen_shri_i32(t0, REG(B11_8), 31);
726 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
727 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
729 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
730 using 64-bit temps, we compute arg0's high part from q ^ m, so
731 that it is 0x00000000 when adding the value or 0xffffffff when
732 subtracting it. */
733 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
734 tcg_gen_subi_i32(t1, t1, 1);
735 tcg_gen_neg_i32(t2, REG(B7_4));
736 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
737 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
739 /* compute T and Q depending on carry */
740 tcg_gen_andi_i32(t1, t1, 1);
741 tcg_gen_xor_i32(t1, t1, t0);
742 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
743 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
745 tcg_temp_free(zero);
746 tcg_temp_free(t2);
747 tcg_temp_free(t1);
748 tcg_temp_free(t0);
750 return;
751 case 0x300d: /* dmuls.l Rm,Rn */
752 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
753 return;
754 case 0x3005: /* dmulu.l Rm,Rn */
755 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
756 return;
757 case 0x600e: /* exts.b Rm,Rn */
758 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
759 return;
760 case 0x600f: /* exts.w Rm,Rn */
761 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
762 return;
763 case 0x600c: /* extu.b Rm,Rn */
764 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
765 return;
766 case 0x600d: /* extu.w Rm,Rn */
767 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
768 return;
769 case 0x000f: /* mac.l @Rm+,@Rn+ */
771 TCGv arg0, arg1;
772 arg0 = tcg_temp_new();
773 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
774 arg1 = tcg_temp_new();
775 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
776 gen_helper_macl(cpu_env, arg0, arg1);
777 tcg_temp_free(arg1);
778 tcg_temp_free(arg0);
779 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
780 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
782 return;
783 case 0x400f: /* mac.w @Rm+,@Rn+ */
785 TCGv arg0, arg1;
786 arg0 = tcg_temp_new();
787 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
788 arg1 = tcg_temp_new();
789 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
790 gen_helper_macw(cpu_env, arg0, arg1);
791 tcg_temp_free(arg1);
792 tcg_temp_free(arg0);
793 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
794 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
796 return;
797 case 0x0007: /* mul.l Rm,Rn */
798 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
799 return;
800 case 0x200f: /* muls.w Rm,Rn */
802 TCGv arg0, arg1;
803 arg0 = tcg_temp_new();
804 tcg_gen_ext16s_i32(arg0, REG(B7_4));
805 arg1 = tcg_temp_new();
806 tcg_gen_ext16s_i32(arg1, REG(B11_8));
807 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
808 tcg_temp_free(arg1);
809 tcg_temp_free(arg0);
811 return;
812 case 0x200e: /* mulu.w Rm,Rn */
814 TCGv arg0, arg1;
815 arg0 = tcg_temp_new();
816 tcg_gen_ext16u_i32(arg0, REG(B7_4));
817 arg1 = tcg_temp_new();
818 tcg_gen_ext16u_i32(arg1, REG(B11_8));
819 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
820 tcg_temp_free(arg1);
821 tcg_temp_free(arg0);
823 return;
824 case 0x600b: /* neg Rm,Rn */
825 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
826 return;
827 case 0x600a: /* negc Rm,Rn */
829 TCGv t0 = tcg_const_i32(0);
830 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
831 REG(B7_4), t0, cpu_sr_t, t0);
832 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
833 t0, t0, REG(B11_8), cpu_sr_t);
834 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
835 tcg_temp_free(t0);
837 return;
838 case 0x6007: /* not Rm,Rn */
839 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
840 return;
841 case 0x200b: /* or Rm,Rn */
842 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
843 return;
844 case 0x400c: /* shad Rm,Rn */
846 TCGv t0 = tcg_temp_new();
847 TCGv t1 = tcg_temp_new();
848 TCGv t2 = tcg_temp_new();
850 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
852 /* positive case: shift to the left */
853 tcg_gen_shl_i32(t1, REG(B11_8), t0);
855 /* negative case: shift to the right in two steps to
856 correctly handle the -32 case */
857 tcg_gen_xori_i32(t0, t0, 0x1f);
858 tcg_gen_sar_i32(t2, REG(B11_8), t0);
859 tcg_gen_sari_i32(t2, t2, 1);
861 /* select between the two cases */
862 tcg_gen_movi_i32(t0, 0);
863 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
865 tcg_temp_free(t0);
866 tcg_temp_free(t1);
867 tcg_temp_free(t2);
869 return;
870 case 0x400d: /* shld Rm,Rn */
872 TCGv t0 = tcg_temp_new();
873 TCGv t1 = tcg_temp_new();
874 TCGv t2 = tcg_temp_new();
876 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
878 /* positive case: shift to the left */
879 tcg_gen_shl_i32(t1, REG(B11_8), t0);
881 /* negative case: shift to the right in two steps to
882 correctly handle the -32 case */
883 tcg_gen_xori_i32(t0, t0, 0x1f);
884 tcg_gen_shr_i32(t2, REG(B11_8), t0);
885 tcg_gen_shri_i32(t2, t2, 1);
887 /* select between the two cases */
888 tcg_gen_movi_i32(t0, 0);
889 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
891 tcg_temp_free(t0);
892 tcg_temp_free(t1);
893 tcg_temp_free(t2);
895 return;
896 case 0x3008: /* sub Rm,Rn */
897 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
898 return;
899 case 0x300a: /* subc Rm,Rn */
901 TCGv t0, t1;
902 t0 = tcg_const_tl(0);
903 t1 = tcg_temp_new();
904 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
905 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
906 REG(B11_8), t0, t1, cpu_sr_t);
907 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
908 tcg_temp_free(t0);
909 tcg_temp_free(t1);
911 return;
912 case 0x300b: /* subv Rm,Rn */
914 TCGv t0, t1, t2;
915 t0 = tcg_temp_new();
916 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
917 t1 = tcg_temp_new();
918 tcg_gen_xor_i32(t1, t0, REG(B7_4));
919 t2 = tcg_temp_new();
920 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
921 tcg_gen_and_i32(t1, t1, t2);
922 tcg_temp_free(t2);
923 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
924 tcg_temp_free(t1);
925 tcg_gen_mov_i32(REG(B11_8), t0);
926 tcg_temp_free(t0);
928 return;
929 case 0x2008: /* tst Rm,Rn */
931 TCGv val = tcg_temp_new();
932 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
933 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
934 tcg_temp_free(val);
936 return;
937 case 0x200a: /* xor Rm,Rn */
938 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
939 return;
940 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
941 CHECK_FPU_ENABLED
942 if (ctx->flags & FPSCR_SZ) {
943 TCGv_i64 fp = tcg_temp_new_i64();
944 gen_load_fpr64(fp, XREG(B7_4));
945 gen_store_fpr64(fp, XREG(B11_8));
946 tcg_temp_free_i64(fp);
947 } else {
948 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
950 return;
951 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
952 CHECK_FPU_ENABLED
953 if (ctx->flags & FPSCR_SZ) {
954 TCGv addr_hi = tcg_temp_new();
955 int fr = XREG(B7_4);
956 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
957 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
958 ctx->memidx, MO_TEUL);
959 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
960 ctx->memidx, MO_TEUL);
961 tcg_temp_free(addr_hi);
962 } else {
963 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
964 ctx->memidx, MO_TEUL);
966 return;
967 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
968 CHECK_FPU_ENABLED
969 if (ctx->flags & FPSCR_SZ) {
970 TCGv addr_hi = tcg_temp_new();
971 int fr = XREG(B11_8);
972 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
973 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
974 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
975 tcg_temp_free(addr_hi);
976 } else {
977 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
978 ctx->memidx, MO_TEUL);
980 return;
981 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
982 CHECK_FPU_ENABLED
983 if (ctx->flags & FPSCR_SZ) {
984 TCGv addr_hi = tcg_temp_new();
985 int fr = XREG(B11_8);
986 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
987 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
988 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
989 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
990 tcg_temp_free(addr_hi);
991 } else {
992 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
993 ctx->memidx, MO_TEUL);
994 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
996 return;
997 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
998 CHECK_FPU_ENABLED
999 TCGv addr = tcg_temp_new_i32();
1000 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1001 if (ctx->flags & FPSCR_SZ) {
1002 int fr = XREG(B7_4);
1003 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1004 tcg_gen_subi_i32(addr, addr, 4);
1005 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1006 } else {
1007 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1008 ctx->memidx, MO_TEUL);
1010 tcg_gen_mov_i32(REG(B11_8), addr);
1011 tcg_temp_free(addr);
1012 return;
1013 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1014 CHECK_FPU_ENABLED
1016 TCGv addr = tcg_temp_new_i32();
1017 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1018 if (ctx->flags & FPSCR_SZ) {
1019 int fr = XREG(B11_8);
1020 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1021 ctx->memidx, MO_TEUL);
1022 tcg_gen_addi_i32(addr, addr, 4);
1023 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1024 ctx->memidx, MO_TEUL);
1025 } else {
1026 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1027 ctx->memidx, MO_TEUL);
1029 tcg_temp_free(addr);
1031 return;
1032 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1033 CHECK_FPU_ENABLED
1035 TCGv addr = tcg_temp_new();
1036 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1037 if (ctx->flags & FPSCR_SZ) {
1038 int fr = XREG(B7_4);
1039 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1040 ctx->memidx, MO_TEUL);
1041 tcg_gen_addi_i32(addr, addr, 4);
1042 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1043 ctx->memidx, MO_TEUL);
1044 } else {
1045 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1046 ctx->memidx, MO_TEUL);
1048 tcg_temp_free(addr);
1050 return;
1051 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1058 CHECK_FPU_ENABLED
1059 if (ctx->flags & FPSCR_PR) {
1060 TCGv_i64 fp0, fp1;
1062 if (ctx->opcode & 0x0110)
1063 break; /* illegal instruction */
1064 fp0 = tcg_temp_new_i64();
1065 fp1 = tcg_temp_new_i64();
1066 gen_load_fpr64(fp0, DREG(B11_8));
1067 gen_load_fpr64(fp1, DREG(B7_4));
1068 switch (ctx->opcode & 0xf00f) {
1069 case 0xf000: /* fadd Rm,Rn */
1070 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1071 break;
1072 case 0xf001: /* fsub Rm,Rn */
1073 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1074 break;
1075 case 0xf002: /* fmul Rm,Rn */
1076 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1077 break;
1078 case 0xf003: /* fdiv Rm,Rn */
1079 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1080 break;
1081 case 0xf004: /* fcmp/eq Rm,Rn */
1082 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1083 return;
1084 case 0xf005: /* fcmp/gt Rm,Rn */
1085 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1086 return;
1088 gen_store_fpr64(fp0, DREG(B11_8));
1089 tcg_temp_free_i64(fp0);
1090 tcg_temp_free_i64(fp1);
1091 } else {
1092 switch (ctx->opcode & 0xf00f) {
1093 case 0xf000: /* fadd Rm,Rn */
1094 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1095 cpu_fregs[FREG(B11_8)],
1096 cpu_fregs[FREG(B7_4)]);
1097 break;
1098 case 0xf001: /* fsub Rm,Rn */
1099 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1100 cpu_fregs[FREG(B11_8)],
1101 cpu_fregs[FREG(B7_4)]);
1102 break;
1103 case 0xf002: /* fmul Rm,Rn */
1104 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1105 cpu_fregs[FREG(B11_8)],
1106 cpu_fregs[FREG(B7_4)]);
1107 break;
1108 case 0xf003: /* fdiv Rm,Rn */
1109 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1110 cpu_fregs[FREG(B11_8)],
1111 cpu_fregs[FREG(B7_4)]);
1112 break;
1113 case 0xf004: /* fcmp/eq Rm,Rn */
1114 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1115 cpu_fregs[FREG(B7_4)]);
1116 return;
1117 case 0xf005: /* fcmp/gt Rm,Rn */
1118 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1119 cpu_fregs[FREG(B7_4)]);
1120 return;
1124 return;
1125 case 0xf00e: /* fmac FR0,RM,Rn */
1127 CHECK_FPU_ENABLED
1128 if (ctx->flags & FPSCR_PR) {
1129 break; /* illegal instruction */
1130 } else {
1131 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1132 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1133 cpu_fregs[FREG(B11_8)]);
1134 return;
1139 switch (ctx->opcode & 0xff00) {
1140 case 0xc900: /* and #imm,R0 */
1141 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1142 return;
1143 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1145 TCGv addr, val;
1146 addr = tcg_temp_new();
1147 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1148 val = tcg_temp_new();
1149 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1150 tcg_gen_andi_i32(val, val, B7_0);
1151 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1152 tcg_temp_free(val);
1153 tcg_temp_free(addr);
1155 return;
1156 case 0x8b00: /* bf label */
1157 CHECK_NOT_DELAY_SLOT
1158 gen_conditional_jump(ctx, ctx->pc + 2,
1159 ctx->pc + 4 + B7_0s * 2);
1160 ctx->bstate = BS_BRANCH;
1161 return;
1162 case 0x8f00: /* bf/s label */
1163 CHECK_NOT_DELAY_SLOT
1164 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1165 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1166 return;
1167 case 0x8900: /* bt label */
1168 CHECK_NOT_DELAY_SLOT
1169 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1170 ctx->pc + 2);
1171 ctx->bstate = BS_BRANCH;
1172 return;
1173 case 0x8d00: /* bt/s label */
1174 CHECK_NOT_DELAY_SLOT
1175 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1176 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1177 return;
1178 case 0x8800: /* cmp/eq #imm,R0 */
1179 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1180 return;
1181 case 0xc400: /* mov.b @(disp,GBR),R0 */
1183 TCGv addr = tcg_temp_new();
1184 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1185 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1186 tcg_temp_free(addr);
1188 return;
1189 case 0xc500: /* mov.w @(disp,GBR),R0 */
1191 TCGv addr = tcg_temp_new();
1192 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1193 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1194 tcg_temp_free(addr);
1196 return;
1197 case 0xc600: /* mov.l @(disp,GBR),R0 */
1199 TCGv addr = tcg_temp_new();
1200 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1201 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1202 tcg_temp_free(addr);
1204 return;
1205 case 0xc000: /* mov.b R0,@(disp,GBR) */
1207 TCGv addr = tcg_temp_new();
1208 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1209 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1210 tcg_temp_free(addr);
1212 return;
1213 case 0xc100: /* mov.w R0,@(disp,GBR) */
1215 TCGv addr = tcg_temp_new();
1216 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1217 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1218 tcg_temp_free(addr);
1220 return;
1221 case 0xc200: /* mov.l R0,@(disp,GBR) */
1223 TCGv addr = tcg_temp_new();
1224 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1225 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1226 tcg_temp_free(addr);
1228 return;
1229 case 0x8000: /* mov.b R0,@(disp,Rn) */
1231 TCGv addr = tcg_temp_new();
1232 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1233 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1234 tcg_temp_free(addr);
1236 return;
1237 case 0x8100: /* mov.w R0,@(disp,Rn) */
1239 TCGv addr = tcg_temp_new();
1240 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1241 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1242 tcg_temp_free(addr);
1244 return;
1245 case 0x8400: /* mov.b @(disp,Rn),R0 */
1247 TCGv addr = tcg_temp_new();
1248 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1249 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1250 tcg_temp_free(addr);
1252 return;
1253 case 0x8500: /* mov.w @(disp,Rn),R0 */
1255 TCGv addr = tcg_temp_new();
1256 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1257 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1258 tcg_temp_free(addr);
1260 return;
1261 case 0xc700: /* mova @(disp,PC),R0 */
1262 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1263 return;
1264 case 0xcb00: /* or #imm,R0 */
1265 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1266 return;
1267 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1269 TCGv addr, val;
1270 addr = tcg_temp_new();
1271 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1272 val = tcg_temp_new();
1273 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1274 tcg_gen_ori_i32(val, val, B7_0);
1275 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1276 tcg_temp_free(val);
1277 tcg_temp_free(addr);
1279 return;
1280 case 0xc300: /* trapa #imm */
1282 TCGv imm;
1283 CHECK_NOT_DELAY_SLOT
1284 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1285 imm = tcg_const_i32(B7_0);
1286 gen_helper_trapa(cpu_env, imm);
1287 tcg_temp_free(imm);
1288 ctx->bstate = BS_BRANCH;
1290 return;
1291 case 0xc800: /* tst #imm,R0 */
1293 TCGv val = tcg_temp_new();
1294 tcg_gen_andi_i32(val, REG(0), B7_0);
1295 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1296 tcg_temp_free(val);
1298 return;
1299 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1301 TCGv val = tcg_temp_new();
1302 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1303 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1304 tcg_gen_andi_i32(val, val, B7_0);
1305 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1306 tcg_temp_free(val);
1308 return;
1309 case 0xca00: /* xor #imm,R0 */
1310 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1311 return;
1312 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1314 TCGv addr, val;
1315 addr = tcg_temp_new();
1316 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1317 val = tcg_temp_new();
1318 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1319 tcg_gen_xori_i32(val, val, B7_0);
1320 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1321 tcg_temp_free(val);
1322 tcg_temp_free(addr);
1324 return;
1327 switch (ctx->opcode & 0xf08f) {
1328 case 0x408e: /* ldc Rm,Rn_BANK */
1329 CHECK_PRIVILEGED
1330 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1331 return;
1332 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1333 CHECK_PRIVILEGED
1334 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1335 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1336 return;
1337 case 0x0082: /* stc Rm_BANK,Rn */
1338 CHECK_PRIVILEGED
1339 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1340 return;
1341 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1342 CHECK_PRIVILEGED
1344 TCGv addr = tcg_temp_new();
1345 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1346 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1347 tcg_gen_mov_i32(REG(B11_8), addr);
1348 tcg_temp_free(addr);
1350 return;
1353 switch (ctx->opcode & 0xf0ff) {
1354 case 0x0023: /* braf Rn */
1355 CHECK_NOT_DELAY_SLOT
1356 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1357 ctx->flags |= DELAY_SLOT;
1358 ctx->delayed_pc = (uint32_t) - 1;
1359 return;
1360 case 0x0003: /* bsrf Rn */
1361 CHECK_NOT_DELAY_SLOT
1362 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1363 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1364 ctx->flags |= DELAY_SLOT;
1365 ctx->delayed_pc = (uint32_t) - 1;
1366 return;
1367 case 0x4015: /* cmp/pl Rn */
1368 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1369 return;
1370 case 0x4011: /* cmp/pz Rn */
1371 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1372 return;
1373 case 0x4010: /* dt Rn */
1374 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1375 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1376 return;
1377 case 0x402b: /* jmp @Rn */
1378 CHECK_NOT_DELAY_SLOT
1379 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1380 ctx->flags |= DELAY_SLOT;
1381 ctx->delayed_pc = (uint32_t) - 1;
1382 return;
1383 case 0x400b: /* jsr @Rn */
1384 CHECK_NOT_DELAY_SLOT
1385 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1386 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1387 ctx->flags |= DELAY_SLOT;
1388 ctx->delayed_pc = (uint32_t) - 1;
1389 return;
1390 case 0x400e: /* ldc Rm,SR */
1391 CHECK_PRIVILEGED
1393 TCGv val = tcg_temp_new();
1394 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1395 gen_write_sr(val);
1396 tcg_temp_free(val);
1397 ctx->bstate = BS_STOP;
1399 return;
1400 case 0x4007: /* ldc.l @Rm+,SR */
1401 CHECK_PRIVILEGED
1403 TCGv val = tcg_temp_new();
1404 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1405 tcg_gen_andi_i32(val, val, 0x700083f3);
1406 gen_write_sr(val);
1407 tcg_temp_free(val);
1408 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1409 ctx->bstate = BS_STOP;
1411 return;
1412 case 0x0002: /* stc SR,Rn */
1413 CHECK_PRIVILEGED
1414 gen_read_sr(REG(B11_8));
1415 return;
1416 case 0x4003: /* stc SR,@-Rn */
1417 CHECK_PRIVILEGED
1419 TCGv addr = tcg_temp_new();
1420 TCGv val = tcg_temp_new();
1421 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1422 gen_read_sr(val);
1423 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1424 tcg_gen_mov_i32(REG(B11_8), addr);
1425 tcg_temp_free(val);
1426 tcg_temp_free(addr);
1428 return;
1429 #define LD(reg,ldnum,ldpnum,prechk) \
1430 case ldnum: \
1431 prechk \
1432 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1433 return; \
1434 case ldpnum: \
1435 prechk \
1436 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1437 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1438 return;
1439 #define ST(reg,stnum,stpnum,prechk) \
1440 case stnum: \
1441 prechk \
1442 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1443 return; \
1444 case stpnum: \
1445 prechk \
1447 TCGv addr = tcg_temp_new(); \
1448 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1449 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1450 tcg_gen_mov_i32(REG(B11_8), addr); \
1451 tcg_temp_free(addr); \
1453 return;
1454 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1455 LD(reg,ldnum,ldpnum,prechk) \
1456 ST(reg,stnum,stpnum,prechk)
1457 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1458 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1459 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1460 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1461 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1462 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1463 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1464 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1465 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1466 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1467 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1468 case 0x406a: /* lds Rm,FPSCR */
1469 CHECK_FPU_ENABLED
1470 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1471 ctx->bstate = BS_STOP;
1472 return;
1473 case 0x4066: /* lds.l @Rm+,FPSCR */
1474 CHECK_FPU_ENABLED
1476 TCGv addr = tcg_temp_new();
1477 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1478 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1479 gen_helper_ld_fpscr(cpu_env, addr);
1480 tcg_temp_free(addr);
1481 ctx->bstate = BS_STOP;
1483 return;
1484 case 0x006a: /* sts FPSCR,Rn */
1485 CHECK_FPU_ENABLED
1486 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1487 return;
1488 case 0x4062: /* sts FPSCR,@-Rn */
1489 CHECK_FPU_ENABLED
1491 TCGv addr, val;
1492 val = tcg_temp_new();
1493 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1494 addr = tcg_temp_new();
1495 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1496 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1497 tcg_gen_mov_i32(REG(B11_8), addr);
1498 tcg_temp_free(addr);
1499 tcg_temp_free(val);
1501 return;
1502 case 0x00c3: /* movca.l R0,@Rm */
1504 TCGv val = tcg_temp_new();
1505 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1506 gen_helper_movcal(cpu_env, REG(B11_8), val);
1507 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1509 ctx->has_movcal = 1;
1510 return;
1511 case 0x40a9:
1512 /* MOVUA.L @Rm,R0 (Rm) -> R0
1513 Load non-boundary-aligned data */
1514 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1515 return;
1516 case 0x40e9:
1517 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1518 Load non-boundary-aligned data */
1519 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1520 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1521 return;
1522 case 0x0029: /* movt Rn */
1523 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1524 return;
1525 case 0x0073:
1526 /* MOVCO.L
1527 LDST -> T
1528 If (T == 1) R0 -> (Rn)
1529 0 -> LDST
1531 if (ctx->features & SH_FEATURE_SH4A) {
1532 TCGLabel *label = gen_new_label();
1533 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1534 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1535 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1536 gen_set_label(label);
1537 tcg_gen_movi_i32(cpu_ldst, 0);
1538 return;
1539 } else
1540 break;
1541 case 0x0063:
1542 /* MOVLI.L @Rm,R0
1543 1 -> LDST
1544 (Rm) -> R0
1545 When interrupt/exception
1546 occurred 0 -> LDST
1548 if (ctx->features & SH_FEATURE_SH4A) {
1549 tcg_gen_movi_i32(cpu_ldst, 0);
1550 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1551 tcg_gen_movi_i32(cpu_ldst, 1);
1552 return;
1553 } else
1554 break;
1555 case 0x0093: /* ocbi @Rn */
1557 gen_helper_ocbi(cpu_env, REG(B11_8));
1559 return;
1560 case 0x00a3: /* ocbp @Rn */
1561 case 0x00b3: /* ocbwb @Rn */
1562 /* These instructions are supposed to do nothing in case of
1563 a cache miss. Given that we only partially emulate caches
1564 it is safe to simply ignore them. */
1565 return;
1566 case 0x0083: /* pref @Rn */
1567 return;
1568 case 0x00d3: /* prefi @Rn */
1569 if (ctx->features & SH_FEATURE_SH4A)
1570 return;
1571 else
1572 break;
1573 case 0x00e3: /* icbi @Rn */
1574 if (ctx->features & SH_FEATURE_SH4A)
1575 return;
1576 else
1577 break;
1578 case 0x00ab: /* synco */
1579 if (ctx->features & SH_FEATURE_SH4A)
1580 return;
1581 else
1582 break;
1583 case 0x4024: /* rotcl Rn */
1585 TCGv tmp = tcg_temp_new();
1586 tcg_gen_mov_i32(tmp, cpu_sr_t);
1587 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1588 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1589 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1590 tcg_temp_free(tmp);
1592 return;
1593 case 0x4025: /* rotcr Rn */
1595 TCGv tmp = tcg_temp_new();
1596 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1597 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1598 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1599 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1600 tcg_temp_free(tmp);
1602 return;
1603 case 0x4004: /* rotl Rn */
1604 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1605 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1606 return;
1607 case 0x4005: /* rotr Rn */
1608 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1609 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1610 return;
1611 case 0x4000: /* shll Rn */
1612 case 0x4020: /* shal Rn */
1613 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1614 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1615 return;
1616 case 0x4021: /* shar Rn */
1617 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1618 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1619 return;
1620 case 0x4001: /* shlr Rn */
1621 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1622 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1623 return;
1624 case 0x4008: /* shll2 Rn */
1625 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1626 return;
1627 case 0x4018: /* shll8 Rn */
1628 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1629 return;
1630 case 0x4028: /* shll16 Rn */
1631 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1632 return;
1633 case 0x4009: /* shlr2 Rn */
1634 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1635 return;
1636 case 0x4019: /* shlr8 Rn */
1637 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1638 return;
1639 case 0x4029: /* shlr16 Rn */
1640 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1641 return;
1642 case 0x401b: /* tas.b @Rn */
1644 TCGv addr, val;
1645 addr = tcg_temp_local_new();
1646 tcg_gen_mov_i32(addr, REG(B11_8));
1647 val = tcg_temp_local_new();
1648 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1649 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1650 tcg_gen_ori_i32(val, val, 0x80);
1651 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1652 tcg_temp_free(val);
1653 tcg_temp_free(addr);
1655 return;
1656 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1657 CHECK_FPU_ENABLED
1658 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1659 return;
1660 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1661 CHECK_FPU_ENABLED
1662 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1663 return;
1664 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1665 CHECK_FPU_ENABLED
1666 if (ctx->flags & FPSCR_PR) {
1667 TCGv_i64 fp;
1668 if (ctx->opcode & 0x0100)
1669 break; /* illegal instruction */
1670 fp = tcg_temp_new_i64();
1671 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1672 gen_store_fpr64(fp, DREG(B11_8));
1673 tcg_temp_free_i64(fp);
1675 else {
1676 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1678 return;
1679 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1680 CHECK_FPU_ENABLED
1681 if (ctx->flags & FPSCR_PR) {
1682 TCGv_i64 fp;
1683 if (ctx->opcode & 0x0100)
1684 break; /* illegal instruction */
1685 fp = tcg_temp_new_i64();
1686 gen_load_fpr64(fp, DREG(B11_8));
1687 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1688 tcg_temp_free_i64(fp);
1690 else {
1691 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1693 return;
1694 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1695 CHECK_FPU_ENABLED
1697 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1699 return;
1700 case 0xf05d: /* fabs FRn/DRn */
1701 CHECK_FPU_ENABLED
1702 if (ctx->flags & FPSCR_PR) {
1703 if (ctx->opcode & 0x0100)
1704 break; /* illegal instruction */
1705 TCGv_i64 fp = tcg_temp_new_i64();
1706 gen_load_fpr64(fp, DREG(B11_8));
1707 gen_helper_fabs_DT(fp, fp);
1708 gen_store_fpr64(fp, DREG(B11_8));
1709 tcg_temp_free_i64(fp);
1710 } else {
1711 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1713 return;
1714 case 0xf06d: /* fsqrt FRn */
1715 CHECK_FPU_ENABLED
1716 if (ctx->flags & FPSCR_PR) {
1717 if (ctx->opcode & 0x0100)
1718 break; /* illegal instruction */
1719 TCGv_i64 fp = tcg_temp_new_i64();
1720 gen_load_fpr64(fp, DREG(B11_8));
1721 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1722 gen_store_fpr64(fp, DREG(B11_8));
1723 tcg_temp_free_i64(fp);
1724 } else {
1725 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1726 cpu_fregs[FREG(B11_8)]);
1728 return;
1729 case 0xf07d: /* fsrra FRn */
1730 CHECK_FPU_ENABLED
1731 break;
1732 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1733 CHECK_FPU_ENABLED
1734 if (!(ctx->flags & FPSCR_PR)) {
1735 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1737 return;
1738 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1739 CHECK_FPU_ENABLED
1740 if (!(ctx->flags & FPSCR_PR)) {
1741 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1743 return;
1744 case 0xf0ad: /* fcnvsd FPUL,DRn */
1745 CHECK_FPU_ENABLED
1747 TCGv_i64 fp = tcg_temp_new_i64();
1748 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1749 gen_store_fpr64(fp, DREG(B11_8));
1750 tcg_temp_free_i64(fp);
1752 return;
1753 case 0xf0bd: /* fcnvds DRn,FPUL */
1754 CHECK_FPU_ENABLED
1756 TCGv_i64 fp = tcg_temp_new_i64();
1757 gen_load_fpr64(fp, DREG(B11_8));
1758 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1759 tcg_temp_free_i64(fp);
1761 return;
1762 case 0xf0ed: /* fipr FVm,FVn */
1763 CHECK_FPU_ENABLED
1764 if ((ctx->flags & FPSCR_PR) == 0) {
1765 TCGv m, n;
1766 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1767 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1768 gen_helper_fipr(cpu_env, m, n);
1769 tcg_temp_free(m);
1770 tcg_temp_free(n);
1771 return;
1773 break;
1774 case 0xf0fd: /* ftrv XMTRX,FVn */
1775 CHECK_FPU_ENABLED
1776 if ((ctx->opcode & 0x0300) == 0x0100 &&
1777 (ctx->flags & FPSCR_PR) == 0) {
1778 TCGv n;
1779 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1780 gen_helper_ftrv(cpu_env, n);
1781 tcg_temp_free(n);
1782 return;
1784 break;
1786 #if 0
1787 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1788 ctx->opcode, ctx->pc);
1789 fflush(stderr);
1790 #endif
1791 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1792 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1793 gen_helper_raise_slot_illegal_instruction(cpu_env);
1794 } else {
1795 gen_helper_raise_illegal_instruction(cpu_env);
1797 ctx->bstate = BS_BRANCH;
1800 static void decode_opc(DisasContext * ctx)
1802 uint32_t old_flags = ctx->flags;
1804 _decode_opc(ctx);
1806 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1807 if (ctx->flags & DELAY_SLOT_CLEARME) {
1808 gen_store_flags(0);
1809 } else {
1810 /* go out of the delay slot */
1811 uint32_t new_flags = ctx->flags;
1812 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1813 gen_store_flags(new_flags);
1815 ctx->flags = 0;
1816 ctx->bstate = BS_BRANCH;
1817 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1818 gen_delayed_conditional_jump(ctx);
1819 } else if (old_flags & DELAY_SLOT) {
1820 gen_jump(ctx);
1825 /* go into a delay slot */
1826 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1827 gen_store_flags(ctx->flags);
1830 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1832 SuperHCPU *cpu = sh_env_get_cpu(env);
1833 CPUState *cs = CPU(cpu);
1834 DisasContext ctx;
1835 target_ulong pc_start;
1836 int num_insns;
1837 int max_insns;
1839 pc_start = tb->pc;
1840 ctx.pc = pc_start;
1841 ctx.flags = (uint32_t)tb->flags;
1842 ctx.bstate = BS_NONE;
1843 ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
1844 /* We don't know if the delayed pc came from a dynamic or static branch,
1845 so assume it is a dynamic branch. */
1846 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1847 ctx.tb = tb;
1848 ctx.singlestep_enabled = cs->singlestep_enabled;
1849 ctx.features = env->features;
1850 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1852 num_insns = 0;
1853 max_insns = tb->cflags & CF_COUNT_MASK;
1854 if (max_insns == 0) {
1855 max_insns = CF_COUNT_MASK;
1857 if (max_insns > TCG_MAX_INSNS) {
1858 max_insns = TCG_MAX_INSNS;
1861 gen_tb_start(tb);
1862 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
1863 tcg_gen_insn_start(ctx.pc, ctx.flags);
1864 num_insns++;
1866 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
1867 /* We have hit a breakpoint - make sure PC is up-to-date */
1868 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1869 gen_helper_debug(cpu_env);
1870 ctx.bstate = BS_BRANCH;
1871 /* The address covered by the breakpoint must be included in
1872 [tb->pc, tb->pc + tb->size) in order to for it to be
1873 properly cleared -- thus we increment the PC here so that
1874 the logic setting tb->size below does the right thing. */
1875 ctx.pc += 2;
1876 break;
1879 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1880 gen_io_start();
1883 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1884 decode_opc(&ctx);
1885 ctx.pc += 2;
1886 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1887 break;
1888 if (cs->singlestep_enabled) {
1889 break;
1891 if (num_insns >= max_insns)
1892 break;
1893 if (singlestep)
1894 break;
1896 if (tb->cflags & CF_LAST_IO)
1897 gen_io_end();
1898 if (cs->singlestep_enabled) {
1899 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1900 gen_helper_debug(cpu_env);
1901 } else {
1902 switch (ctx.bstate) {
1903 case BS_STOP:
1904 /* gen_op_interrupt_restart(); */
1905 /* fall through */
1906 case BS_NONE:
1907 if (ctx.flags) {
1908 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1910 gen_goto_tb(&ctx, 0, ctx.pc);
1911 break;
1912 case BS_EXCP:
1913 /* gen_op_interrupt_restart(); */
1914 tcg_gen_exit_tb(0);
1915 break;
1916 case BS_BRANCH:
1917 default:
1918 break;
1922 gen_tb_end(tb, num_insns);
1924 tb->size = ctx.pc - pc_start;
1925 tb->icount = num_insns;
1927 #ifdef DEBUG_DISAS
1928 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1929 && qemu_log_in_addr_range(pc_start)) {
1930 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1931 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
1932 qemu_log("\n");
1934 #endif
1937 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1938 target_ulong *data)
1940 env->pc = data[0];
1941 env->flags = data[1];