s390x/flic: make floating interrupts on TCG actually floating
[qemu/ar7.git] / target / sh4 / translate.c
blob012156b97b2488f13bba09c73e2af772062a2678
1 /*
2 * SH4 translation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #define DEBUG_DISAS
22 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 typedef struct DisasContext {
36 DisasContextBase base;
38 uint32_t tbflags; /* should stay unmodified during the TB translation */
39 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
40 int memidx;
41 int gbank;
42 int fbank;
43 uint32_t delayed_pc;
44 uint32_t features;
46 uint16_t opcode;
48 bool has_movcal;
49 } DisasContext;
51 #if defined(CONFIG_USER_ONLY)
52 #define IS_USER(ctx) 1
53 #else
54 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
55 #endif
57 /* Target-specific values for ctx->base.is_jmp. */
58 /* We want to exit back to the cpu loop for some reason.
59 Usually this is to recognize interrupts immediately. */
60 #define DISAS_STOP DISAS_TARGET_0
62 /* global register indexes */
63 static TCGv cpu_gregs[32];
64 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68 static TCGv cpu_lock_addr, cpu_lock_value;
69 static TCGv cpu_fregs[32];
71 /* internal register indexes */
72 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
74 #include "exec/gen-icount.h"
76 void sh4_translate_init(void)
78 int i;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 for (i = 0; i < 24; i++) {
98 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
99 offsetof(CPUSH4State, gregs[i]),
100 gregnames[i]);
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104 cpu_pc = tcg_global_mem_new_i32(cpu_env,
105 offsetof(CPUSH4State, pc), "PC");
106 cpu_sr = tcg_global_mem_new_i32(cpu_env,
107 offsetof(CPUSH4State, sr), "SR");
108 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, ssr), "SSR");
116 cpu_spc = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, spc), "SPC");
118 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, gbr), "GBR");
120 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, vbr), "VBR");
122 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, sgr), "SGR");
124 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, dbr), "DBR");
126 cpu_mach = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, mach), "MACH");
128 cpu_macl = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, macl), "MACL");
130 cpu_pr = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, pr), "PR");
132 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, fpscr), "FPSCR");
134 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, fpul), "FPUL");
137 cpu_flags = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, flags), "_flags_");
139 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, delayed_pc),
141 "_delayed_pc_");
142 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State,
144 delayed_cond),
145 "_delayed_cond_");
146 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, lock_addr),
148 "_lock_addr_");
149 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, lock_value),
151 "_lock_value_");
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, fregs[i]),
156 fregnames[i]);
159 void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
164 int i;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
188 static void gen_read_sr(TCGv dst)
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
200 static void gen_write_sr(TCGv src)
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
209 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
211 if (save_pc) {
212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
222 static inline bool use_exit_tb(DisasContext *ctx)
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
227 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
229 /* Use a direct jump if in same page and singlestep not enabled */
230 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
231 return false;
233 #ifndef CONFIG_USER_ONLY
234 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
235 #else
236 return true;
237 #endif
240 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242 if (use_goto_tb(ctx, dest)) {
243 tcg_gen_goto_tb(n);
244 tcg_gen_movi_i32(cpu_pc, dest);
245 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
246 } else {
247 tcg_gen_movi_i32(cpu_pc, dest);
248 if (ctx->base.singlestep_enabled) {
249 gen_helper_debug(cpu_env);
250 } else if (use_exit_tb(ctx)) {
251 tcg_gen_exit_tb(0);
252 } else {
253 tcg_gen_lookup_and_goto_ptr();
256 ctx->base.is_jmp = DISAS_NORETURN;
259 static void gen_jump(DisasContext * ctx)
261 if (ctx->delayed_pc == -1) {
262 /* Target is not statically known, it comes necessarily from a
263 delayed jump as immediate jump are conditinal jumps */
264 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
265 tcg_gen_discard_i32(cpu_delayed_pc);
266 if (ctx->base.singlestep_enabled) {
267 gen_helper_debug(cpu_env);
268 } else if (use_exit_tb(ctx)) {
269 tcg_gen_exit_tb(0);
270 } else {
271 tcg_gen_lookup_and_goto_ptr();
273 ctx->base.is_jmp = DISAS_NORETURN;
274 } else {
275 gen_goto_tb(ctx, 0, ctx->delayed_pc);
279 /* Immediate conditional jump (bt or bf) */
280 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
281 bool jump_if_true)
283 TCGLabel *l1 = gen_new_label();
284 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
286 if (ctx->tbflags & GUSA_EXCLUSIVE) {
287 /* When in an exclusive region, we must continue to the end.
288 Therefore, exit the region on a taken branch, but otherwise
289 fall through to the next instruction. */
290 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
291 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
292 /* Note that this won't actually use a goto_tb opcode because we
293 disallow it in use_goto_tb, but it handles exit + singlestep. */
294 gen_goto_tb(ctx, 0, dest);
295 gen_set_label(l1);
296 return;
299 gen_save_cpu_state(ctx, false);
300 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
301 gen_goto_tb(ctx, 0, dest);
302 gen_set_label(l1);
303 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
304 ctx->base.is_jmp = DISAS_NORETURN;
307 /* Delayed conditional jump (bt or bf) */
308 static void gen_delayed_conditional_jump(DisasContext * ctx)
310 TCGLabel *l1 = gen_new_label();
311 TCGv ds = tcg_temp_new();
313 tcg_gen_mov_i32(ds, cpu_delayed_cond);
314 tcg_gen_discard_i32(cpu_delayed_cond);
316 if (ctx->tbflags & GUSA_EXCLUSIVE) {
317 /* When in an exclusive region, we must continue to the end.
318 Therefore, exit the region on a taken branch, but otherwise
319 fall through to the next instruction. */
320 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
322 /* Leave the gUSA region. */
323 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
324 gen_jump(ctx);
326 gen_set_label(l1);
327 ctx->base.is_jmp = DISAS_NEXT;
328 return;
331 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
332 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
333 gen_set_label(l1);
334 gen_jump(ctx);
337 static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
339 /* We have already signaled illegal instruction for odd Dr. */
340 tcg_debug_assert((reg & 1) == 0);
341 reg ^= ctx->fbank;
342 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
345 static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
347 /* We have already signaled illegal instruction for odd Dr. */
348 tcg_debug_assert((reg & 1) == 0);
349 reg ^= ctx->fbank;
350 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
353 #define B3_0 (ctx->opcode & 0xf)
354 #define B6_4 ((ctx->opcode >> 4) & 0x7)
355 #define B7_4 ((ctx->opcode >> 4) & 0xf)
356 #define B7_0 (ctx->opcode & 0xff)
357 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
358 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
359 (ctx->opcode & 0xfff))
360 #define B11_8 ((ctx->opcode >> 8) & 0xf)
361 #define B15_12 ((ctx->opcode >> 12) & 0xf)
363 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
364 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
365 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
367 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
369 #define CHECK_NOT_DELAY_SLOT \
370 if (ctx->envflags & DELAY_SLOT_MASK) { \
371 goto do_illegal_slot; \
374 #define CHECK_PRIVILEGED \
375 if (IS_USER(ctx)) { \
376 goto do_illegal; \
379 #define CHECK_FPU_ENABLED \
380 if (ctx->tbflags & (1u << SR_FD)) { \
381 goto do_fpu_disabled; \
384 #define CHECK_FPSCR_PR_0 \
385 if (ctx->tbflags & FPSCR_PR) { \
386 goto do_illegal; \
389 #define CHECK_FPSCR_PR_1 \
390 if (!(ctx->tbflags & FPSCR_PR)) { \
391 goto do_illegal; \
394 #define CHECK_SH4A \
395 if (!(ctx->features & SH_FEATURE_SH4A)) { \
396 goto do_illegal; \
399 static void _decode_opc(DisasContext * ctx)
401 /* This code tries to make movcal emulation sufficiently
402 accurate for Linux purposes. This instruction writes
403 memory, and prior to that, always allocates a cache line.
404 It is used in two contexts:
405 - in memcpy, where data is copied in blocks, the first write
406 of to a block uses movca.l for performance.
407 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
408 to flush the cache. Here, the data written by movcal.l is never
409 written to memory, and the data written is just bogus.
411 To simulate this, we simulate movcal.l, we store the value to memory,
412 but we also remember the previous content. If we see ocbi, we check
413 if movcal.l for that address was done previously. If so, the write should
414 not have hit the memory, so we restore the previous content.
415 When we see an instruction that is neither movca.l
416 nor ocbi, the previous content is discarded.
418 To optimize, we only try to flush stores when we're at the start of
419 TB, or if we already saw movca.l in this TB and did not flush stores
420 yet. */
421 if (ctx->has_movcal)
423 int opcode = ctx->opcode & 0xf0ff;
424 if (opcode != 0x0093 /* ocbi */
425 && opcode != 0x00c3 /* movca.l */)
427 gen_helper_discard_movcal_backup(cpu_env);
428 ctx->has_movcal = 0;
432 #if 0
433 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
434 #endif
436 switch (ctx->opcode) {
437 case 0x0019: /* div0u */
438 tcg_gen_movi_i32(cpu_sr_m, 0);
439 tcg_gen_movi_i32(cpu_sr_q, 0);
440 tcg_gen_movi_i32(cpu_sr_t, 0);
441 return;
442 case 0x000b: /* rts */
443 CHECK_NOT_DELAY_SLOT
444 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
445 ctx->envflags |= DELAY_SLOT;
446 ctx->delayed_pc = (uint32_t) - 1;
447 return;
448 case 0x0028: /* clrmac */
449 tcg_gen_movi_i32(cpu_mach, 0);
450 tcg_gen_movi_i32(cpu_macl, 0);
451 return;
452 case 0x0048: /* clrs */
453 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
454 return;
455 case 0x0008: /* clrt */
456 tcg_gen_movi_i32(cpu_sr_t, 0);
457 return;
458 case 0x0038: /* ldtlb */
459 CHECK_PRIVILEGED
460 gen_helper_ldtlb(cpu_env);
461 return;
462 case 0x002b: /* rte */
463 CHECK_PRIVILEGED
464 CHECK_NOT_DELAY_SLOT
465 gen_write_sr(cpu_ssr);
466 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
467 ctx->envflags |= DELAY_SLOT_RTE;
468 ctx->delayed_pc = (uint32_t) - 1;
469 ctx->base.is_jmp = DISAS_STOP;
470 return;
471 case 0x0058: /* sets */
472 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
473 return;
474 case 0x0018: /* sett */
475 tcg_gen_movi_i32(cpu_sr_t, 1);
476 return;
477 case 0xfbfd: /* frchg */
478 CHECK_FPSCR_PR_0
479 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
480 ctx->base.is_jmp = DISAS_STOP;
481 return;
482 case 0xf3fd: /* fschg */
483 CHECK_FPSCR_PR_0
484 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
485 ctx->base.is_jmp = DISAS_STOP;
486 return;
487 case 0xf7fd: /* fpchg */
488 CHECK_SH4A
489 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
490 ctx->base.is_jmp = DISAS_STOP;
491 return;
492 case 0x0009: /* nop */
493 return;
494 case 0x001b: /* sleep */
495 CHECK_PRIVILEGED
496 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
497 gen_helper_sleep(cpu_env);
498 return;
501 switch (ctx->opcode & 0xf000) {
502 case 0x1000: /* mov.l Rm,@(disp,Rn) */
504 TCGv addr = tcg_temp_new();
505 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
506 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
507 tcg_temp_free(addr);
509 return;
510 case 0x5000: /* mov.l @(disp,Rm),Rn */
512 TCGv addr = tcg_temp_new();
513 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
514 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
515 tcg_temp_free(addr);
517 return;
518 case 0xe000: /* mov #imm,Rn */
519 #ifdef CONFIG_USER_ONLY
520 /* Detect the start of a gUSA region. If so, update envflags
521 and end the TB. This will allow us to see the end of the
522 region (stored in R0) in the next TB. */
523 if (B11_8 == 15 && B7_0s < 0 &&
524 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
525 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
526 ctx->base.is_jmp = DISAS_STOP;
528 #endif
529 tcg_gen_movi_i32(REG(B11_8), B7_0s);
530 return;
531 case 0x9000: /* mov.w @(disp,PC),Rn */
533 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
534 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
535 tcg_temp_free(addr);
537 return;
538 case 0xd000: /* mov.l @(disp,PC),Rn */
540 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
541 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
542 tcg_temp_free(addr);
544 return;
545 case 0x7000: /* add #imm,Rn */
546 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
547 return;
548 case 0xa000: /* bra disp */
549 CHECK_NOT_DELAY_SLOT
550 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
551 ctx->envflags |= DELAY_SLOT;
552 return;
553 case 0xb000: /* bsr disp */
554 CHECK_NOT_DELAY_SLOT
555 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
556 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
557 ctx->envflags |= DELAY_SLOT;
558 return;
561 switch (ctx->opcode & 0xf00f) {
562 case 0x6003: /* mov Rm,Rn */
563 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
564 return;
565 case 0x2000: /* mov.b Rm,@Rn */
566 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
567 return;
568 case 0x2001: /* mov.w Rm,@Rn */
569 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
570 return;
571 case 0x2002: /* mov.l Rm,@Rn */
572 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
573 return;
574 case 0x6000: /* mov.b @Rm,Rn */
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
576 return;
577 case 0x6001: /* mov.w @Rm,Rn */
578 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
579 return;
580 case 0x6002: /* mov.l @Rm,Rn */
581 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
582 return;
583 case 0x2004: /* mov.b Rm,@-Rn */
585 TCGv addr = tcg_temp_new();
586 tcg_gen_subi_i32(addr, REG(B11_8), 1);
587 /* might cause re-execution */
588 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
589 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
590 tcg_temp_free(addr);
592 return;
593 case 0x2005: /* mov.w Rm,@-Rn */
595 TCGv addr = tcg_temp_new();
596 tcg_gen_subi_i32(addr, REG(B11_8), 2);
597 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
598 tcg_gen_mov_i32(REG(B11_8), addr);
599 tcg_temp_free(addr);
601 return;
602 case 0x2006: /* mov.l Rm,@-Rn */
604 TCGv addr = tcg_temp_new();
605 tcg_gen_subi_i32(addr, REG(B11_8), 4);
606 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
607 tcg_gen_mov_i32(REG(B11_8), addr);
608 tcg_temp_free(addr);
610 return;
611 case 0x6004: /* mov.b @Rm+,Rn */
612 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
613 if ( B11_8 != B7_4 )
614 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
615 return;
616 case 0x6005: /* mov.w @Rm+,Rn */
617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
618 if ( B11_8 != B7_4 )
619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
620 return;
621 case 0x6006: /* mov.l @Rm+,Rn */
622 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
623 if ( B11_8 != B7_4 )
624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
625 return;
626 case 0x0004: /* mov.b Rm,@(R0,Rn) */
628 TCGv addr = tcg_temp_new();
629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
631 tcg_temp_free(addr);
633 return;
634 case 0x0005: /* mov.w Rm,@(R0,Rn) */
636 TCGv addr = tcg_temp_new();
637 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
638 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
639 tcg_temp_free(addr);
641 return;
642 case 0x0006: /* mov.l Rm,@(R0,Rn) */
644 TCGv addr = tcg_temp_new();
645 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
646 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
647 tcg_temp_free(addr);
649 return;
650 case 0x000c: /* mov.b @(R0,Rm),Rn */
652 TCGv addr = tcg_temp_new();
653 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
654 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
655 tcg_temp_free(addr);
657 return;
658 case 0x000d: /* mov.w @(R0,Rm),Rn */
660 TCGv addr = tcg_temp_new();
661 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
662 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
663 tcg_temp_free(addr);
665 return;
666 case 0x000e: /* mov.l @(R0,Rm),Rn */
668 TCGv addr = tcg_temp_new();
669 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
670 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
671 tcg_temp_free(addr);
673 return;
674 case 0x6008: /* swap.b Rm,Rn */
676 TCGv low = tcg_temp_new();
677 tcg_gen_ext16u_i32(low, REG(B7_4));
678 tcg_gen_bswap16_i32(low, low);
679 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
680 tcg_temp_free(low);
682 return;
683 case 0x6009: /* swap.w Rm,Rn */
684 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
685 return;
686 case 0x200d: /* xtrct Rm,Rn */
688 TCGv high, low;
689 high = tcg_temp_new();
690 tcg_gen_shli_i32(high, REG(B7_4), 16);
691 low = tcg_temp_new();
692 tcg_gen_shri_i32(low, REG(B11_8), 16);
693 tcg_gen_or_i32(REG(B11_8), high, low);
694 tcg_temp_free(low);
695 tcg_temp_free(high);
697 return;
698 case 0x300c: /* add Rm,Rn */
699 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
700 return;
701 case 0x300e: /* addc Rm,Rn */
703 TCGv t0, t1;
704 t0 = tcg_const_tl(0);
705 t1 = tcg_temp_new();
706 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
707 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
708 REG(B11_8), t0, t1, cpu_sr_t);
709 tcg_temp_free(t0);
710 tcg_temp_free(t1);
712 return;
713 case 0x300f: /* addv Rm,Rn */
715 TCGv t0, t1, t2;
716 t0 = tcg_temp_new();
717 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
718 t1 = tcg_temp_new();
719 tcg_gen_xor_i32(t1, t0, REG(B11_8));
720 t2 = tcg_temp_new();
721 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
722 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
723 tcg_temp_free(t2);
724 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
725 tcg_temp_free(t1);
726 tcg_gen_mov_i32(REG(B7_4), t0);
727 tcg_temp_free(t0);
729 return;
730 case 0x2009: /* and Rm,Rn */
731 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
732 return;
733 case 0x3000: /* cmp/eq Rm,Rn */
734 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
735 return;
736 case 0x3003: /* cmp/ge Rm,Rn */
737 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
738 return;
739 case 0x3007: /* cmp/gt Rm,Rn */
740 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
741 return;
742 case 0x3006: /* cmp/hi Rm,Rn */
743 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
744 return;
745 case 0x3002: /* cmp/hs Rm,Rn */
746 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
747 return;
748 case 0x200c: /* cmp/str Rm,Rn */
750 TCGv cmp1 = tcg_temp_new();
751 TCGv cmp2 = tcg_temp_new();
752 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
753 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
754 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
755 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
756 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
757 tcg_temp_free(cmp2);
758 tcg_temp_free(cmp1);
760 return;
761 case 0x2007: /* div0s Rm,Rn */
762 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
763 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
764 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
765 return;
766 case 0x3004: /* div1 Rm,Rn */
768 TCGv t0 = tcg_temp_new();
769 TCGv t1 = tcg_temp_new();
770 TCGv t2 = tcg_temp_new();
771 TCGv zero = tcg_const_i32(0);
773 /* shift left arg1, saving the bit being pushed out and inserting
774 T on the right */
775 tcg_gen_shri_i32(t0, REG(B11_8), 31);
776 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
777 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
779 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
780 using 64-bit temps, we compute arg0's high part from q ^ m, so
781 that it is 0x00000000 when adding the value or 0xffffffff when
782 subtracting it. */
783 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
784 tcg_gen_subi_i32(t1, t1, 1);
785 tcg_gen_neg_i32(t2, REG(B7_4));
786 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
787 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
789 /* compute T and Q depending on carry */
790 tcg_gen_andi_i32(t1, t1, 1);
791 tcg_gen_xor_i32(t1, t1, t0);
792 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
793 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
795 tcg_temp_free(zero);
796 tcg_temp_free(t2);
797 tcg_temp_free(t1);
798 tcg_temp_free(t0);
800 return;
801 case 0x300d: /* dmuls.l Rm,Rn */
802 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
803 return;
804 case 0x3005: /* dmulu.l Rm,Rn */
805 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
806 return;
807 case 0x600e: /* exts.b Rm,Rn */
808 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
809 return;
810 case 0x600f: /* exts.w Rm,Rn */
811 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
812 return;
813 case 0x600c: /* extu.b Rm,Rn */
814 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
815 return;
816 case 0x600d: /* extu.w Rm,Rn */
817 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
818 return;
819 case 0x000f: /* mac.l @Rm+,@Rn+ */
821 TCGv arg0, arg1;
822 arg0 = tcg_temp_new();
823 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
824 arg1 = tcg_temp_new();
825 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
826 gen_helper_macl(cpu_env, arg0, arg1);
827 tcg_temp_free(arg1);
828 tcg_temp_free(arg0);
829 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
830 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
832 return;
833 case 0x400f: /* mac.w @Rm+,@Rn+ */
835 TCGv arg0, arg1;
836 arg0 = tcg_temp_new();
837 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
838 arg1 = tcg_temp_new();
839 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
840 gen_helper_macw(cpu_env, arg0, arg1);
841 tcg_temp_free(arg1);
842 tcg_temp_free(arg0);
843 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
844 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
846 return;
847 case 0x0007: /* mul.l Rm,Rn */
848 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
849 return;
850 case 0x200f: /* muls.w Rm,Rn */
852 TCGv arg0, arg1;
853 arg0 = tcg_temp_new();
854 tcg_gen_ext16s_i32(arg0, REG(B7_4));
855 arg1 = tcg_temp_new();
856 tcg_gen_ext16s_i32(arg1, REG(B11_8));
857 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
858 tcg_temp_free(arg1);
859 tcg_temp_free(arg0);
861 return;
862 case 0x200e: /* mulu.w Rm,Rn */
864 TCGv arg0, arg1;
865 arg0 = tcg_temp_new();
866 tcg_gen_ext16u_i32(arg0, REG(B7_4));
867 arg1 = tcg_temp_new();
868 tcg_gen_ext16u_i32(arg1, REG(B11_8));
869 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
870 tcg_temp_free(arg1);
871 tcg_temp_free(arg0);
873 return;
874 case 0x600b: /* neg Rm,Rn */
875 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
876 return;
877 case 0x600a: /* negc Rm,Rn */
879 TCGv t0 = tcg_const_i32(0);
880 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
881 REG(B7_4), t0, cpu_sr_t, t0);
882 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
883 t0, t0, REG(B11_8), cpu_sr_t);
884 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
885 tcg_temp_free(t0);
887 return;
888 case 0x6007: /* not Rm,Rn */
889 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
890 return;
891 case 0x200b: /* or Rm,Rn */
892 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
893 return;
894 case 0x400c: /* shad Rm,Rn */
896 TCGv t0 = tcg_temp_new();
897 TCGv t1 = tcg_temp_new();
898 TCGv t2 = tcg_temp_new();
900 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
902 /* positive case: shift to the left */
903 tcg_gen_shl_i32(t1, REG(B11_8), t0);
905 /* negative case: shift to the right in two steps to
906 correctly handle the -32 case */
907 tcg_gen_xori_i32(t0, t0, 0x1f);
908 tcg_gen_sar_i32(t2, REG(B11_8), t0);
909 tcg_gen_sari_i32(t2, t2, 1);
911 /* select between the two cases */
912 tcg_gen_movi_i32(t0, 0);
913 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
915 tcg_temp_free(t0);
916 tcg_temp_free(t1);
917 tcg_temp_free(t2);
919 return;
920 case 0x400d: /* shld Rm,Rn */
922 TCGv t0 = tcg_temp_new();
923 TCGv t1 = tcg_temp_new();
924 TCGv t2 = tcg_temp_new();
926 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
928 /* positive case: shift to the left */
929 tcg_gen_shl_i32(t1, REG(B11_8), t0);
931 /* negative case: shift to the right in two steps to
932 correctly handle the -32 case */
933 tcg_gen_xori_i32(t0, t0, 0x1f);
934 tcg_gen_shr_i32(t2, REG(B11_8), t0);
935 tcg_gen_shri_i32(t2, t2, 1);
937 /* select between the two cases */
938 tcg_gen_movi_i32(t0, 0);
939 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
941 tcg_temp_free(t0);
942 tcg_temp_free(t1);
943 tcg_temp_free(t2);
945 return;
946 case 0x3008: /* sub Rm,Rn */
947 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
948 return;
949 case 0x300a: /* subc Rm,Rn */
951 TCGv t0, t1;
952 t0 = tcg_const_tl(0);
953 t1 = tcg_temp_new();
954 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
955 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
956 REG(B11_8), t0, t1, cpu_sr_t);
957 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
958 tcg_temp_free(t0);
959 tcg_temp_free(t1);
961 return;
962 case 0x300b: /* subv Rm,Rn */
964 TCGv t0, t1, t2;
965 t0 = tcg_temp_new();
966 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
967 t1 = tcg_temp_new();
968 tcg_gen_xor_i32(t1, t0, REG(B7_4));
969 t2 = tcg_temp_new();
970 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
971 tcg_gen_and_i32(t1, t1, t2);
972 tcg_temp_free(t2);
973 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
974 tcg_temp_free(t1);
975 tcg_gen_mov_i32(REG(B11_8), t0);
976 tcg_temp_free(t0);
978 return;
979 case 0x2008: /* tst Rm,Rn */
981 TCGv val = tcg_temp_new();
982 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
983 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
984 tcg_temp_free(val);
986 return;
987 case 0x200a: /* xor Rm,Rn */
988 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
989 return;
990 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
991 CHECK_FPU_ENABLED
992 if (ctx->tbflags & FPSCR_SZ) {
993 int xsrc = XHACK(B7_4);
994 int xdst = XHACK(B11_8);
995 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
996 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
997 } else {
998 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
1000 return;
1001 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1002 CHECK_FPU_ENABLED
1003 if (ctx->tbflags & FPSCR_SZ) {
1004 TCGv_i64 fp = tcg_temp_new_i64();
1005 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1006 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1007 tcg_temp_free_i64(fp);
1008 } else {
1009 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1011 return;
1012 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1013 CHECK_FPU_ENABLED
1014 if (ctx->tbflags & FPSCR_SZ) {
1015 TCGv_i64 fp = tcg_temp_new_i64();
1016 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1017 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1018 tcg_temp_free_i64(fp);
1019 } else {
1020 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1022 return;
1023 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1024 CHECK_FPU_ENABLED
1025 if (ctx->tbflags & FPSCR_SZ) {
1026 TCGv_i64 fp = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1028 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1029 tcg_temp_free_i64(fp);
1030 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1031 } else {
1032 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1033 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1035 return;
1036 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1037 CHECK_FPU_ENABLED
1039 TCGv addr = tcg_temp_new_i32();
1040 if (ctx->tbflags & FPSCR_SZ) {
1041 TCGv_i64 fp = tcg_temp_new_i64();
1042 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1043 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1044 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1045 tcg_temp_free_i64(fp);
1046 } else {
1047 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1048 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1050 tcg_gen_mov_i32(REG(B11_8), addr);
1051 tcg_temp_free(addr);
1053 return;
1054 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1055 CHECK_FPU_ENABLED
1057 TCGv addr = tcg_temp_new_i32();
1058 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1059 if (ctx->tbflags & FPSCR_SZ) {
1060 TCGv_i64 fp = tcg_temp_new_i64();
1061 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1062 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1063 tcg_temp_free_i64(fp);
1064 } else {
1065 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1067 tcg_temp_free(addr);
1069 return;
1070 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1071 CHECK_FPU_ENABLED
1073 TCGv addr = tcg_temp_new();
1074 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1075 if (ctx->tbflags & FPSCR_SZ) {
1076 TCGv_i64 fp = tcg_temp_new_i64();
1077 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1078 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1079 tcg_temp_free_i64(fp);
1080 } else {
1081 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1083 tcg_temp_free(addr);
1085 return;
1086 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1087 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1088 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1090 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1091 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1093 CHECK_FPU_ENABLED
1094 if (ctx->tbflags & FPSCR_PR) {
1095 TCGv_i64 fp0, fp1;
1097 if (ctx->opcode & 0x0110) {
1098 goto do_illegal;
1100 fp0 = tcg_temp_new_i64();
1101 fp1 = tcg_temp_new_i64();
1102 gen_load_fpr64(ctx, fp0, B11_8);
1103 gen_load_fpr64(ctx, fp1, B7_4);
1104 switch (ctx->opcode & 0xf00f) {
1105 case 0xf000: /* fadd Rm,Rn */
1106 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1107 break;
1108 case 0xf001: /* fsub Rm,Rn */
1109 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1110 break;
1111 case 0xf002: /* fmul Rm,Rn */
1112 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1113 break;
1114 case 0xf003: /* fdiv Rm,Rn */
1115 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1116 break;
1117 case 0xf004: /* fcmp/eq Rm,Rn */
1118 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1119 return;
1120 case 0xf005: /* fcmp/gt Rm,Rn */
1121 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1122 return;
1124 gen_store_fpr64(ctx, fp0, B11_8);
1125 tcg_temp_free_i64(fp0);
1126 tcg_temp_free_i64(fp1);
1127 } else {
1128 switch (ctx->opcode & 0xf00f) {
1129 case 0xf000: /* fadd Rm,Rn */
1130 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1131 FREG(B11_8), FREG(B7_4));
1132 break;
1133 case 0xf001: /* fsub Rm,Rn */
1134 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1135 FREG(B11_8), FREG(B7_4));
1136 break;
1137 case 0xf002: /* fmul Rm,Rn */
1138 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1139 FREG(B11_8), FREG(B7_4));
1140 break;
1141 case 0xf003: /* fdiv Rm,Rn */
1142 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1143 FREG(B11_8), FREG(B7_4));
1144 break;
1145 case 0xf004: /* fcmp/eq Rm,Rn */
1146 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1147 FREG(B11_8), FREG(B7_4));
1148 return;
1149 case 0xf005: /* fcmp/gt Rm,Rn */
1150 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1151 FREG(B11_8), FREG(B7_4));
1152 return;
1156 return;
1157 case 0xf00e: /* fmac FR0,RM,Rn */
1158 CHECK_FPU_ENABLED
1159 CHECK_FPSCR_PR_0
1160 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1161 FREG(0), FREG(B7_4), FREG(B11_8));
1162 return;
1165 switch (ctx->opcode & 0xff00) {
1166 case 0xc900: /* and #imm,R0 */
1167 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1168 return;
1169 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1171 TCGv addr, val;
1172 addr = tcg_temp_new();
1173 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1174 val = tcg_temp_new();
1175 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1176 tcg_gen_andi_i32(val, val, B7_0);
1177 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1178 tcg_temp_free(val);
1179 tcg_temp_free(addr);
1181 return;
1182 case 0x8b00: /* bf label */
1183 CHECK_NOT_DELAY_SLOT
1184 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1185 return;
1186 case 0x8f00: /* bf/s label */
1187 CHECK_NOT_DELAY_SLOT
1188 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1189 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1190 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1191 return;
1192 case 0x8900: /* bt label */
1193 CHECK_NOT_DELAY_SLOT
1194 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1195 return;
1196 case 0x8d00: /* bt/s label */
1197 CHECK_NOT_DELAY_SLOT
1198 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1199 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1200 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1201 return;
1202 case 0x8800: /* cmp/eq #imm,R0 */
1203 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1204 return;
1205 case 0xc400: /* mov.b @(disp,GBR),R0 */
1207 TCGv addr = tcg_temp_new();
1208 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1209 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1210 tcg_temp_free(addr);
1212 return;
1213 case 0xc500: /* mov.w @(disp,GBR),R0 */
1215 TCGv addr = tcg_temp_new();
1216 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1217 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1218 tcg_temp_free(addr);
1220 return;
1221 case 0xc600: /* mov.l @(disp,GBR),R0 */
1223 TCGv addr = tcg_temp_new();
1224 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1225 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1226 tcg_temp_free(addr);
1228 return;
1229 case 0xc000: /* mov.b R0,@(disp,GBR) */
1231 TCGv addr = tcg_temp_new();
1232 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1233 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1234 tcg_temp_free(addr);
1236 return;
1237 case 0xc100: /* mov.w R0,@(disp,GBR) */
1239 TCGv addr = tcg_temp_new();
1240 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1241 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1242 tcg_temp_free(addr);
1244 return;
1245 case 0xc200: /* mov.l R0,@(disp,GBR) */
1247 TCGv addr = tcg_temp_new();
1248 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1249 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1250 tcg_temp_free(addr);
1252 return;
1253 case 0x8000: /* mov.b R0,@(disp,Rn) */
1255 TCGv addr = tcg_temp_new();
1256 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1257 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1258 tcg_temp_free(addr);
1260 return;
1261 case 0x8100: /* mov.w R0,@(disp,Rn) */
1263 TCGv addr = tcg_temp_new();
1264 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1265 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1266 tcg_temp_free(addr);
1268 return;
1269 case 0x8400: /* mov.b @(disp,Rn),R0 */
1271 TCGv addr = tcg_temp_new();
1272 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1273 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1274 tcg_temp_free(addr);
1276 return;
1277 case 0x8500: /* mov.w @(disp,Rn),R0 */
1279 TCGv addr = tcg_temp_new();
1280 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1281 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1282 tcg_temp_free(addr);
1284 return;
1285 case 0xc700: /* mova @(disp,PC),R0 */
1286 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1287 4 + B7_0 * 4) & ~3);
1288 return;
1289 case 0xcb00: /* or #imm,R0 */
1290 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1291 return;
1292 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1294 TCGv addr, val;
1295 addr = tcg_temp_new();
1296 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1297 val = tcg_temp_new();
1298 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1299 tcg_gen_ori_i32(val, val, B7_0);
1300 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1301 tcg_temp_free(val);
1302 tcg_temp_free(addr);
1304 return;
1305 case 0xc300: /* trapa #imm */
1307 TCGv imm;
1308 CHECK_NOT_DELAY_SLOT
1309 gen_save_cpu_state(ctx, true);
1310 imm = tcg_const_i32(B7_0);
1311 gen_helper_trapa(cpu_env, imm);
1312 tcg_temp_free(imm);
1313 ctx->base.is_jmp = DISAS_NORETURN;
1315 return;
1316 case 0xc800: /* tst #imm,R0 */
1318 TCGv val = tcg_temp_new();
1319 tcg_gen_andi_i32(val, REG(0), B7_0);
1320 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1321 tcg_temp_free(val);
1323 return;
1324 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1326 TCGv val = tcg_temp_new();
1327 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1328 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1329 tcg_gen_andi_i32(val, val, B7_0);
1330 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1331 tcg_temp_free(val);
1333 return;
1334 case 0xca00: /* xor #imm,R0 */
1335 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1336 return;
1337 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1339 TCGv addr, val;
1340 addr = tcg_temp_new();
1341 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1342 val = tcg_temp_new();
1343 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1344 tcg_gen_xori_i32(val, val, B7_0);
1345 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1346 tcg_temp_free(val);
1347 tcg_temp_free(addr);
1349 return;
1352 switch (ctx->opcode & 0xf08f) {
1353 case 0x408e: /* ldc Rm,Rn_BANK */
1354 CHECK_PRIVILEGED
1355 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1356 return;
1357 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1358 CHECK_PRIVILEGED
1359 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1360 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1361 return;
1362 case 0x0082: /* stc Rm_BANK,Rn */
1363 CHECK_PRIVILEGED
1364 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1365 return;
1366 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1367 CHECK_PRIVILEGED
1369 TCGv addr = tcg_temp_new();
1370 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1371 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1372 tcg_gen_mov_i32(REG(B11_8), addr);
1373 tcg_temp_free(addr);
1375 return;
1378 switch (ctx->opcode & 0xf0ff) {
1379 case 0x0023: /* braf Rn */
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1382 ctx->envflags |= DELAY_SLOT;
1383 ctx->delayed_pc = (uint32_t) - 1;
1384 return;
1385 case 0x0003: /* bsrf Rn */
1386 CHECK_NOT_DELAY_SLOT
1387 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1388 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1389 ctx->envflags |= DELAY_SLOT;
1390 ctx->delayed_pc = (uint32_t) - 1;
1391 return;
1392 case 0x4015: /* cmp/pl Rn */
1393 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1394 return;
1395 case 0x4011: /* cmp/pz Rn */
1396 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1397 return;
1398 case 0x4010: /* dt Rn */
1399 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1400 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1401 return;
1402 case 0x402b: /* jmp @Rn */
1403 CHECK_NOT_DELAY_SLOT
1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1405 ctx->envflags |= DELAY_SLOT;
1406 ctx->delayed_pc = (uint32_t) - 1;
1407 return;
1408 case 0x400b: /* jsr @Rn */
1409 CHECK_NOT_DELAY_SLOT
1410 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1411 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1412 ctx->envflags |= DELAY_SLOT;
1413 ctx->delayed_pc = (uint32_t) - 1;
1414 return;
1415 case 0x400e: /* ldc Rm,SR */
1416 CHECK_PRIVILEGED
1418 TCGv val = tcg_temp_new();
1419 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1420 gen_write_sr(val);
1421 tcg_temp_free(val);
1422 ctx->base.is_jmp = DISAS_STOP;
1424 return;
1425 case 0x4007: /* ldc.l @Rm+,SR */
1426 CHECK_PRIVILEGED
1428 TCGv val = tcg_temp_new();
1429 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1430 tcg_gen_andi_i32(val, val, 0x700083f3);
1431 gen_write_sr(val);
1432 tcg_temp_free(val);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 ctx->base.is_jmp = DISAS_STOP;
1436 return;
1437 case 0x0002: /* stc SR,Rn */
1438 CHECK_PRIVILEGED
1439 gen_read_sr(REG(B11_8));
1440 return;
1441 case 0x4003: /* stc SR,@-Rn */
1442 CHECK_PRIVILEGED
1444 TCGv addr = tcg_temp_new();
1445 TCGv val = tcg_temp_new();
1446 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1447 gen_read_sr(val);
1448 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1449 tcg_gen_mov_i32(REG(B11_8), addr);
1450 tcg_temp_free(val);
1451 tcg_temp_free(addr);
1453 return;
1454 #define LD(reg,ldnum,ldpnum,prechk) \
1455 case ldnum: \
1456 prechk \
1457 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1458 return; \
1459 case ldpnum: \
1460 prechk \
1461 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1462 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1463 return;
1464 #define ST(reg,stnum,stpnum,prechk) \
1465 case stnum: \
1466 prechk \
1467 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1468 return; \
1469 case stpnum: \
1470 prechk \
1472 TCGv addr = tcg_temp_new(); \
1473 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1474 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1475 tcg_gen_mov_i32(REG(B11_8), addr); \
1476 tcg_temp_free(addr); \
1478 return;
1479 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1480 LD(reg,ldnum,ldpnum,prechk) \
1481 ST(reg,stnum,stpnum,prechk)
1482 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1483 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1484 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1485 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1486 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1487 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1488 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1489 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1490 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1491 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1492 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1493 case 0x406a: /* lds Rm,FPSCR */
1494 CHECK_FPU_ENABLED
1495 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1496 ctx->base.is_jmp = DISAS_STOP;
1497 return;
1498 case 0x4066: /* lds.l @Rm+,FPSCR */
1499 CHECK_FPU_ENABLED
1501 TCGv addr = tcg_temp_new();
1502 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1503 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1504 gen_helper_ld_fpscr(cpu_env, addr);
1505 tcg_temp_free(addr);
1506 ctx->base.is_jmp = DISAS_STOP;
1508 return;
1509 case 0x006a: /* sts FPSCR,Rn */
1510 CHECK_FPU_ENABLED
1511 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1512 return;
1513 case 0x4062: /* sts FPSCR,@-Rn */
1514 CHECK_FPU_ENABLED
1516 TCGv addr, val;
1517 val = tcg_temp_new();
1518 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1519 addr = tcg_temp_new();
1520 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1521 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1522 tcg_gen_mov_i32(REG(B11_8), addr);
1523 tcg_temp_free(addr);
1524 tcg_temp_free(val);
1526 return;
1527 case 0x00c3: /* movca.l R0,@Rm */
1529 TCGv val = tcg_temp_new();
1530 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1531 gen_helper_movcal(cpu_env, REG(B11_8), val);
1532 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1533 tcg_temp_free(val);
1535 ctx->has_movcal = 1;
1536 return;
1537 case 0x40a9: /* movua.l @Rm,R0 */
1538 CHECK_SH4A
1539 /* Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1541 MO_TEUL | MO_UNALN);
1542 return;
1543 break;
1544 case 0x40e9: /* movua.l @Rm+,R0 */
1545 CHECK_SH4A
1546 /* Load non-boundary-aligned data */
1547 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1548 MO_TEUL | MO_UNALN);
1549 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1550 return;
1551 break;
1552 case 0x0029: /* movt Rn */
1553 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1554 return;
1555 case 0x0073:
1556 /* MOVCO.L
1557 * LDST -> T
1558 * If (T == 1) R0 -> (Rn)
1559 * 0 -> LDST
1561 * The above description doesn't work in a parallel context.
1562 * Since we currently support no smp boards, this implies user-mode.
1563 * But we can still support the official mechanism while user-mode
1564 * is single-threaded. */
1565 CHECK_SH4A
1567 TCGLabel *fail = gen_new_label();
1568 TCGLabel *done = gen_new_label();
1570 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1571 TCGv tmp;
1573 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1574 cpu_lock_addr, fail);
1575 tmp = tcg_temp_new();
1576 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1577 REG(0), ctx->memidx, MO_TEUL);
1578 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1579 tcg_temp_free(tmp);
1580 } else {
1581 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1582 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1583 tcg_gen_movi_i32(cpu_sr_t, 1);
1585 tcg_gen_br(done);
1587 gen_set_label(fail);
1588 tcg_gen_movi_i32(cpu_sr_t, 0);
1590 gen_set_label(done);
1591 tcg_gen_movi_i32(cpu_lock_addr, -1);
1593 return;
1594 case 0x0063:
1595 /* MOVLI.L @Rm,R0
1596 * 1 -> LDST
1597 * (Rm) -> R0
1598 * When interrupt/exception
1599 * occurred 0 -> LDST
1601 * In a parallel context, we must also save the loaded value
1602 * for use with the cmpxchg that we'll use with movco.l. */
1603 CHECK_SH4A
1604 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_mov_i32(tmp, REG(B11_8));
1607 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1608 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1609 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1610 tcg_temp_free(tmp);
1611 } else {
1612 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1613 tcg_gen_movi_i32(cpu_lock_addr, 0);
1615 return;
1616 case 0x0093: /* ocbi @Rn */
1618 gen_helper_ocbi(cpu_env, REG(B11_8));
1620 return;
1621 case 0x00a3: /* ocbp @Rn */
1622 case 0x00b3: /* ocbwb @Rn */
1623 /* These instructions are supposed to do nothing in case of
1624 a cache miss. Given that we only partially emulate caches
1625 it is safe to simply ignore them. */
1626 return;
1627 case 0x0083: /* pref @Rn */
1628 return;
1629 case 0x00d3: /* prefi @Rn */
1630 CHECK_SH4A
1631 return;
1632 case 0x00e3: /* icbi @Rn */
1633 CHECK_SH4A
1634 return;
1635 case 0x00ab: /* synco */
1636 CHECK_SH4A
1637 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1638 return;
1639 break;
1640 case 0x4024: /* rotcl Rn */
1642 TCGv tmp = tcg_temp_new();
1643 tcg_gen_mov_i32(tmp, cpu_sr_t);
1644 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1645 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1646 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1647 tcg_temp_free(tmp);
1649 return;
1650 case 0x4025: /* rotcr Rn */
1652 TCGv tmp = tcg_temp_new();
1653 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1654 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1655 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1656 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1657 tcg_temp_free(tmp);
1659 return;
1660 case 0x4004: /* rotl Rn */
1661 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1662 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1663 return;
1664 case 0x4005: /* rotr Rn */
1665 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1666 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1667 return;
1668 case 0x4000: /* shll Rn */
1669 case 0x4020: /* shal Rn */
1670 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1671 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1672 return;
1673 case 0x4021: /* shar Rn */
1674 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1675 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1676 return;
1677 case 0x4001: /* shlr Rn */
1678 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1679 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1680 return;
1681 case 0x4008: /* shll2 Rn */
1682 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1683 return;
1684 case 0x4018: /* shll8 Rn */
1685 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1686 return;
1687 case 0x4028: /* shll16 Rn */
1688 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1689 return;
1690 case 0x4009: /* shlr2 Rn */
1691 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1692 return;
1693 case 0x4019: /* shlr8 Rn */
1694 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1695 return;
1696 case 0x4029: /* shlr16 Rn */
1697 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1698 return;
1699 case 0x401b: /* tas.b @Rn */
1701 TCGv val = tcg_const_i32(0x80);
1702 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1703 ctx->memidx, MO_UB);
1704 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1705 tcg_temp_free(val);
1707 return;
1708 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1709 CHECK_FPU_ENABLED
1710 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1711 return;
1712 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1713 CHECK_FPU_ENABLED
1714 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1715 return;
1716 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1717 CHECK_FPU_ENABLED
1718 if (ctx->tbflags & FPSCR_PR) {
1719 TCGv_i64 fp;
1720 if (ctx->opcode & 0x0100) {
1721 goto do_illegal;
1723 fp = tcg_temp_new_i64();
1724 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1725 gen_store_fpr64(ctx, fp, B11_8);
1726 tcg_temp_free_i64(fp);
1728 else {
1729 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1731 return;
1732 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1733 CHECK_FPU_ENABLED
1734 if (ctx->tbflags & FPSCR_PR) {
1735 TCGv_i64 fp;
1736 if (ctx->opcode & 0x0100) {
1737 goto do_illegal;
1739 fp = tcg_temp_new_i64();
1740 gen_load_fpr64(ctx, fp, B11_8);
1741 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1742 tcg_temp_free_i64(fp);
1744 else {
1745 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1747 return;
1748 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1749 CHECK_FPU_ENABLED
1750 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1751 return;
1752 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1753 CHECK_FPU_ENABLED
1754 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1755 return;
1756 case 0xf06d: /* fsqrt FRn */
1757 CHECK_FPU_ENABLED
1758 if (ctx->tbflags & FPSCR_PR) {
1759 if (ctx->opcode & 0x0100) {
1760 goto do_illegal;
1762 TCGv_i64 fp = tcg_temp_new_i64();
1763 gen_load_fpr64(ctx, fp, B11_8);
1764 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1765 gen_store_fpr64(ctx, fp, B11_8);
1766 tcg_temp_free_i64(fp);
1767 } else {
1768 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1770 return;
1771 case 0xf07d: /* fsrra FRn */
1772 CHECK_FPU_ENABLED
1773 CHECK_FPSCR_PR_0
1774 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1775 break;
1776 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1777 CHECK_FPU_ENABLED
1778 CHECK_FPSCR_PR_0
1779 tcg_gen_movi_i32(FREG(B11_8), 0);
1780 return;
1781 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1782 CHECK_FPU_ENABLED
1783 CHECK_FPSCR_PR_0
1784 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1785 return;
1786 case 0xf0ad: /* fcnvsd FPUL,DRn */
1787 CHECK_FPU_ENABLED
1789 TCGv_i64 fp = tcg_temp_new_i64();
1790 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1791 gen_store_fpr64(ctx, fp, B11_8);
1792 tcg_temp_free_i64(fp);
1794 return;
1795 case 0xf0bd: /* fcnvds DRn,FPUL */
1796 CHECK_FPU_ENABLED
1798 TCGv_i64 fp = tcg_temp_new_i64();
1799 gen_load_fpr64(ctx, fp, B11_8);
1800 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1801 tcg_temp_free_i64(fp);
1803 return;
1804 case 0xf0ed: /* fipr FVm,FVn */
1805 CHECK_FPU_ENABLED
1806 CHECK_FPSCR_PR_1
1808 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1809 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1810 gen_helper_fipr(cpu_env, m, n);
1811 tcg_temp_free(m);
1812 tcg_temp_free(n);
1813 return;
1815 break;
1816 case 0xf0fd: /* ftrv XMTRX,FVn */
1817 CHECK_FPU_ENABLED
1818 CHECK_FPSCR_PR_1
1820 if ((ctx->opcode & 0x0300) != 0x0100) {
1821 goto do_illegal;
1823 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1824 gen_helper_ftrv(cpu_env, n);
1825 tcg_temp_free(n);
1826 return;
1828 break;
1830 #if 0
1831 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1832 ctx->opcode, ctx->base.pc_next);
1833 fflush(stderr);
1834 #endif
1835 do_illegal:
1836 if (ctx->envflags & DELAY_SLOT_MASK) {
1837 do_illegal_slot:
1838 gen_save_cpu_state(ctx, true);
1839 gen_helper_raise_slot_illegal_instruction(cpu_env);
1840 } else {
1841 gen_save_cpu_state(ctx, true);
1842 gen_helper_raise_illegal_instruction(cpu_env);
1844 ctx->base.is_jmp = DISAS_NORETURN;
1845 return;
1847 do_fpu_disabled:
1848 gen_save_cpu_state(ctx, true);
1849 if (ctx->envflags & DELAY_SLOT_MASK) {
1850 gen_helper_raise_slot_fpu_disable(cpu_env);
1851 } else {
1852 gen_helper_raise_fpu_disable(cpu_env);
1854 ctx->base.is_jmp = DISAS_NORETURN;
1855 return;
1858 static void decode_opc(DisasContext * ctx)
1860 uint32_t old_flags = ctx->envflags;
1862 _decode_opc(ctx);
1864 if (old_flags & DELAY_SLOT_MASK) {
1865 /* go out of the delay slot */
1866 ctx->envflags &= ~DELAY_SLOT_MASK;
1868 /* When in an exclusive region, we must continue to the end
1869 for conditional branches. */
1870 if (ctx->tbflags & GUSA_EXCLUSIVE
1871 && old_flags & DELAY_SLOT_CONDITIONAL) {
1872 gen_delayed_conditional_jump(ctx);
1873 return;
1875 /* Otherwise this is probably an invalid gUSA region.
1876 Drop the GUSA bits so the next TB doesn't see them. */
1877 ctx->envflags &= ~GUSA_MASK;
1879 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1880 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1881 gen_delayed_conditional_jump(ctx);
1882 } else {
1883 gen_jump(ctx);
1888 #ifdef CONFIG_USER_ONLY
1889 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1890 Upon an interrupt, a real kernel would simply notice magic values in
1891 the registers and reset the PC to the start of the sequence.
1893 For QEMU, we cannot do this in quite the same way. Instead, we notice
1894 the normal start of such a sequence (mov #-x,r15). While we can handle
1895 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1896 sequences and transform them into atomic operations as seen by the host.
1898 static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1900 uint16_t insns[5];
1901 int ld_adr, ld_dst, ld_mop;
1902 int op_dst, op_src, op_opc;
1903 int mv_src, mt_dst, st_src, st_mop;
1904 TCGv op_arg;
1906 uint32_t pc = ctx->base.pc_next;
1907 uint32_t pc_end = ctx->base.tb->cs_base;
1908 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1909 int max_insns = (pc_end - pc) / 2;
1910 int i;
1912 if (pc != pc_end + backup || max_insns < 2) {
1913 /* This is a malformed gUSA region. Don't do anything special,
1914 since the interpreter is likely to get confused. */
1915 ctx->envflags &= ~GUSA_MASK;
1916 return 0;
1919 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1920 /* Regardless of single-stepping or the end of the page,
1921 we must complete execution of the gUSA region while
1922 holding the exclusive lock. */
1923 *pmax_insns = max_insns;
1924 return 0;
1927 /* The state machine below will consume only a few insns.
1928 If there are more than that in a region, fail now. */
1929 if (max_insns > ARRAY_SIZE(insns)) {
1930 goto fail;
1933 /* Read all of the insns for the region. */
1934 for (i = 0; i < max_insns; ++i) {
1935 insns[i] = cpu_lduw_code(env, pc + i * 2);
1938 ld_adr = ld_dst = ld_mop = -1;
1939 mv_src = -1;
1940 op_dst = op_src = op_opc = -1;
1941 mt_dst = -1;
1942 st_src = st_mop = -1;
1943 op_arg = NULL;
1944 i = 0;
1946 #define NEXT_INSN \
1947 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1950 * Expect a load to begin the region.
1952 NEXT_INSN;
1953 switch (ctx->opcode & 0xf00f) {
1954 case 0x6000: /* mov.b @Rm,Rn */
1955 ld_mop = MO_SB;
1956 break;
1957 case 0x6001: /* mov.w @Rm,Rn */
1958 ld_mop = MO_TESW;
1959 break;
1960 case 0x6002: /* mov.l @Rm,Rn */
1961 ld_mop = MO_TESL;
1962 break;
1963 default:
1964 goto fail;
1966 ld_adr = B7_4;
1967 ld_dst = B11_8;
1968 if (ld_adr == ld_dst) {
1969 goto fail;
1971 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1972 op_dst = ld_dst;
1975 * Expect an optional register move.
1977 NEXT_INSN;
1978 switch (ctx->opcode & 0xf00f) {
1979 case 0x6003: /* mov Rm,Rn */
1980 /* Here we want to recognize ld_dst being saved for later consumtion,
1981 or for another input register being copied so that ld_dst need not
1982 be clobbered during the operation. */
1983 op_dst = B11_8;
1984 mv_src = B7_4;
1985 if (op_dst == ld_dst) {
1986 /* Overwriting the load output. */
1987 goto fail;
1989 if (mv_src != ld_dst) {
1990 /* Copying a new input; constrain op_src to match the load. */
1991 op_src = ld_dst;
1993 break;
1995 default:
1996 /* Put back and re-examine as operation. */
1997 --i;
2001 * Expect the operation.
2003 NEXT_INSN;
2004 switch (ctx->opcode & 0xf00f) {
2005 case 0x300c: /* add Rm,Rn */
2006 op_opc = INDEX_op_add_i32;
2007 goto do_reg_op;
2008 case 0x2009: /* and Rm,Rn */
2009 op_opc = INDEX_op_and_i32;
2010 goto do_reg_op;
2011 case 0x200a: /* xor Rm,Rn */
2012 op_opc = INDEX_op_xor_i32;
2013 goto do_reg_op;
2014 case 0x200b: /* or Rm,Rn */
2015 op_opc = INDEX_op_or_i32;
2016 do_reg_op:
2017 /* The operation register should be as expected, and the
2018 other input cannot depend on the load. */
2019 if (op_dst != B11_8) {
2020 goto fail;
2022 if (op_src < 0) {
2023 /* Unconstrainted input. */
2024 op_src = B7_4;
2025 } else if (op_src == B7_4) {
2026 /* Constrained input matched load. All operations are
2027 commutative; "swap" them by "moving" the load output
2028 to the (implicit) first argument and the move source
2029 to the (explicit) second argument. */
2030 op_src = mv_src;
2031 } else {
2032 goto fail;
2034 op_arg = REG(op_src);
2035 break;
2037 case 0x6007: /* not Rm,Rn */
2038 if (ld_dst != B7_4 || mv_src >= 0) {
2039 goto fail;
2041 op_dst = B11_8;
2042 op_opc = INDEX_op_xor_i32;
2043 op_arg = tcg_const_i32(-1);
2044 break;
2046 case 0x7000 ... 0x700f: /* add #imm,Rn */
2047 if (op_dst != B11_8 || mv_src >= 0) {
2048 goto fail;
2050 op_opc = INDEX_op_add_i32;
2051 op_arg = tcg_const_i32(B7_0s);
2052 break;
2054 case 0x3000: /* cmp/eq Rm,Rn */
2055 /* Looking for the middle of a compare-and-swap sequence,
2056 beginning with the compare. Operands can be either order,
2057 but with only one overlapping the load. */
2058 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2059 goto fail;
2061 op_opc = INDEX_op_setcond_i32; /* placeholder */
2062 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2063 op_arg = REG(op_src);
2065 NEXT_INSN;
2066 switch (ctx->opcode & 0xff00) {
2067 case 0x8b00: /* bf label */
2068 case 0x8f00: /* bf/s label */
2069 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2070 goto fail;
2072 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2073 break;
2075 /* We're looking to unconditionally modify Rn with the
2076 result of the comparison, within the delay slot of
2077 the branch. This is used by older gcc. */
2078 NEXT_INSN;
2079 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2080 mt_dst = B11_8;
2081 } else {
2082 goto fail;
2084 break;
2086 default:
2087 goto fail;
2089 break;
2091 case 0x2008: /* tst Rm,Rn */
2092 /* Looking for a compare-and-swap against zero. */
2093 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2094 goto fail;
2096 op_opc = INDEX_op_setcond_i32;
2097 op_arg = tcg_const_i32(0);
2099 NEXT_INSN;
2100 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2101 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2102 goto fail;
2104 break;
2106 default:
2107 /* Put back and re-examine as store. */
2108 --i;
2112 * Expect the store.
2114 /* The store must be the last insn. */
2115 if (i != max_insns - 1) {
2116 goto fail;
2118 NEXT_INSN;
2119 switch (ctx->opcode & 0xf00f) {
2120 case 0x2000: /* mov.b Rm,@Rn */
2121 st_mop = MO_UB;
2122 break;
2123 case 0x2001: /* mov.w Rm,@Rn */
2124 st_mop = MO_UW;
2125 break;
2126 case 0x2002: /* mov.l Rm,@Rn */
2127 st_mop = MO_UL;
2128 break;
2129 default:
2130 goto fail;
2132 /* The store must match the load. */
2133 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2134 goto fail;
2136 st_src = B7_4;
2138 #undef NEXT_INSN
2141 * Emit the operation.
2143 tcg_gen_insn_start(pc, ctx->envflags);
2144 switch (op_opc) {
2145 case -1:
2146 /* No operation found. Look for exchange pattern. */
2147 if (st_src == ld_dst || mv_src >= 0) {
2148 goto fail;
2150 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2151 ctx->memidx, ld_mop);
2152 break;
2154 case INDEX_op_add_i32:
2155 if (op_dst != st_src) {
2156 goto fail;
2158 if (op_dst == ld_dst && st_mop == MO_UL) {
2159 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2160 op_arg, ctx->memidx, ld_mop);
2161 } else {
2162 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2163 op_arg, ctx->memidx, ld_mop);
2164 if (op_dst != ld_dst) {
2165 /* Note that mop sizes < 4 cannot use add_fetch
2166 because it won't carry into the higher bits. */
2167 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2170 break;
2172 case INDEX_op_and_i32:
2173 if (op_dst != st_src) {
2174 goto fail;
2176 if (op_dst == ld_dst) {
2177 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2178 op_arg, ctx->memidx, ld_mop);
2179 } else {
2180 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2181 op_arg, ctx->memidx, ld_mop);
2182 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2184 break;
2186 case INDEX_op_or_i32:
2187 if (op_dst != st_src) {
2188 goto fail;
2190 if (op_dst == ld_dst) {
2191 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2192 op_arg, ctx->memidx, ld_mop);
2193 } else {
2194 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2195 op_arg, ctx->memidx, ld_mop);
2196 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2198 break;
2200 case INDEX_op_xor_i32:
2201 if (op_dst != st_src) {
2202 goto fail;
2204 if (op_dst == ld_dst) {
2205 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2206 op_arg, ctx->memidx, ld_mop);
2207 } else {
2208 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2209 op_arg, ctx->memidx, ld_mop);
2210 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2212 break;
2214 case INDEX_op_setcond_i32:
2215 if (st_src == ld_dst) {
2216 goto fail;
2218 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2219 REG(st_src), ctx->memidx, ld_mop);
2220 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2221 if (mt_dst >= 0) {
2222 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2224 break;
2226 default:
2227 g_assert_not_reached();
2230 /* If op_src is not a valid register, then op_arg was a constant. */
2231 if (op_src < 0 && op_arg) {
2232 tcg_temp_free_i32(op_arg);
2235 /* The entire region has been translated. */
2236 ctx->envflags &= ~GUSA_MASK;
2237 ctx->base.pc_next = pc_end;
2238 return max_insns;
2240 fail:
2241 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2242 pc, pc_end);
2244 /* Restart with the EXCLUSIVE bit set, within a TB run via
2245 cpu_exec_step_atomic holding the exclusive lock. */
2246 tcg_gen_insn_start(pc, ctx->envflags);
2247 ctx->envflags |= GUSA_EXCLUSIVE;
2248 gen_save_cpu_state(ctx, false);
2249 gen_helper_exclusive(cpu_env);
2250 ctx->base.is_jmp = DISAS_NORETURN;
2252 /* We're not executing an instruction, but we must report one for the
2253 purposes of accounting within the TB. We might as well report the
2254 entire region consumed via ctx->base.pc_next so that it's immediately
2255 available in the disassembly dump. */
2256 ctx->base.pc_next = pc_end;
2257 return 1;
2259 #endif
2261 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2263 CPUSH4State *env = cs->env_ptr;
2264 DisasContext ctx;
2265 target_ulong pc_start;
2266 int num_insns;
2267 int max_insns;
2269 pc_start = tb->pc;
2270 ctx.base.pc_next = pc_start;
2271 ctx.tbflags = (uint32_t)tb->flags;
2272 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2273 ctx.base.is_jmp = DISAS_NEXT;
2274 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2275 /* We don't know if the delayed pc came from a dynamic or static branch,
2276 so assume it is a dynamic branch. */
2277 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2278 ctx.base.tb = tb;
2279 ctx.base.singlestep_enabled = cs->singlestep_enabled;
2280 ctx.features = env->features;
2281 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2282 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2283 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2284 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2286 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
2287 if (max_insns == 0) {
2288 max_insns = CF_COUNT_MASK;
2290 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2292 /* Since the ISA is fixed-width, we can bound by the number
2293 of instructions remaining on the page. */
2294 num_insns = -(ctx.base.pc_next | TARGET_PAGE_MASK) / 2;
2295 max_insns = MIN(max_insns, num_insns);
2297 /* Single stepping means just that. */
2298 if (ctx.base.singlestep_enabled || singlestep) {
2299 max_insns = 1;
2302 gen_tb_start(tb);
2303 num_insns = 0;
2305 #ifdef CONFIG_USER_ONLY
2306 if (ctx.tbflags & GUSA_MASK) {
2307 num_insns = decode_gusa(&ctx, env, &max_insns);
2309 #endif
2311 while (ctx.base.is_jmp == DISAS_NEXT
2312 && num_insns < max_insns
2313 && !tcg_op_buf_full()) {
2314 tcg_gen_insn_start(ctx.base.pc_next, ctx.envflags);
2315 num_insns++;
2317 if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) {
2318 /* We have hit a breakpoint - make sure PC is up-to-date */
2319 gen_save_cpu_state(&ctx, true);
2320 gen_helper_debug(cpu_env);
2321 ctx.base.is_jmp = DISAS_NORETURN;
2322 /* The address covered by the breakpoint must be included in
2323 [tb->pc, tb->pc + tb->size) in order to for it to be
2324 properly cleared -- thus we increment the PC here so that
2325 the logic setting tb->size below does the right thing. */
2326 ctx.base.pc_next += 2;
2327 break;
2330 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
2331 gen_io_start();
2334 ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
2335 decode_opc(&ctx);
2336 ctx.base.pc_next += 2;
2338 if (tb_cflags(tb) & CF_LAST_IO) {
2339 gen_io_end();
2342 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2343 /* Ending the region of exclusivity. Clear the bits. */
2344 ctx.envflags &= ~GUSA_MASK;
2347 switch (ctx.base.is_jmp) {
2348 case DISAS_STOP:
2349 gen_save_cpu_state(&ctx, true);
2350 if (ctx.base.singlestep_enabled) {
2351 gen_helper_debug(cpu_env);
2352 } else {
2353 tcg_gen_exit_tb(0);
2355 break;
2356 case DISAS_NEXT:
2357 gen_save_cpu_state(&ctx, false);
2358 gen_goto_tb(&ctx, 0, ctx.base.pc_next);
2359 break;
2360 case DISAS_NORETURN:
2361 break;
2362 default:
2363 g_assert_not_reached();
2366 gen_tb_end(tb, num_insns);
2368 tb->size = ctx.base.pc_next - pc_start;
2369 tb->icount = num_insns;
2371 #ifdef DEBUG_DISAS
2372 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2373 && qemu_log_in_addr_range(pc_start)) {
2374 qemu_log_lock();
2375 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2376 log_target_disas(cs, pc_start, ctx.base.pc_next - pc_start);
2377 qemu_log("\n");
2378 qemu_log_unlock();
2380 #endif
2383 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2384 target_ulong *data)
2386 env->pc = data[0];
2387 env->flags = data[1];
2388 /* Theoretically delayed_pc should also be restored. In practice the
2389 branch instruction is re-executed after exception, so the delayed
2390 branch target will be recomputed. */